aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/sg.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/scsi/sg.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/scsi/sg.c')
-rw-r--r--drivers/scsi/sg.c3092
1 files changed, 3092 insertions, 0 deletions
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
new file mode 100644
index 00000000000..fd72d73bb24
--- /dev/null
+++ b/drivers/scsi/sg.c
@@ -0,0 +1,3092 @@
+/*
+ * History:
+ * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
+ * to allow user process control of SCSI devices.
+ * Development Sponsored by Killy Corp. NY NY
+ *
+ * Original driver (sg.c):
+ * Copyright (C) 1992 Lawrence Foard
+ * Version 2 and 3 extensions to driver:
+ * Copyright (C) 1998 - 2005 Douglas Gilbert
+ *
+ * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ */
+
+static int sg_version_num = 30532; /* 2 digits for each component */
+#define SG_VERSION_STR "3.5.32"
+
+/*
+ * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
+ * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
+ * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
+ * (otherwise the macros compile to empty statements).
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/mtio.h>
+#include <linux/ioctl.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+#include <linux/moduleparam.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/cdev.h>
+#include <linux/seq_file.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+
+#include "scsi.h"
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_driver.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/sg.h>
+
+#include "scsi_logging.h"
+
+#ifdef CONFIG_SCSI_PROC_FS
+#include <linux/proc_fs.h>
+static char *sg_version_date = "20050117";
+
+static int sg_proc_init(void);
+static void sg_proc_cleanup(void);
+#endif
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif /* LINUX_VERSION_CODE */
+
+#define SG_ALLOW_DIO_DEF 0
+#define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
+
+#define SG_MAX_DEVS 32768
+
+/*
+ * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
+ * Then when using 32 bit integers x * m may overflow during the calculation.
+ * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
+ * calculates the same, but prevents the overflow when both m and d
+ * are "small" numbers (like HZ and USER_HZ).
+ * Of course an overflow is inavoidable if the result of muldiv doesn't fit
+ * in 32 bits.
+ */
+#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
+
+#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
+
+int sg_big_buff = SG_DEF_RESERVED_SIZE;
+/* N.B. This variable is readable and writeable via
+ /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
+ of this size (or less if there is not enough memory) will be reserved
+ for use by this file descriptor. [Deprecated usage: this variable is also
+ readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
+ the kernel (i.e. it is not a module).] */
+static int def_reserved_size = -1; /* picks up init parameter */
+static int sg_allow_dio = SG_ALLOW_DIO_DEF;
+
+#define SG_SECTOR_SZ 512
+#define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
+
+#define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */
+
+static int sg_add(struct class_device *);
+static void sg_remove(struct class_device *);
+
+static Scsi_Request *dummy_cmdp; /* only used for sizeof */
+
+static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock
+ file descriptor list for device */
+
+static struct class_interface sg_interface = {
+ .add = sg_add,
+ .remove = sg_remove,
+};
+
+typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
+ unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
+ unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
+ unsigned bufflen; /* Size of (aggregate) data buffer */
+ unsigned b_malloc_len; /* actual len malloc'ed in buffer */
+ void *buffer; /* Data buffer or scatter list (k_use_sg>0) */
+ char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
+ unsigned char cmd_opcode; /* first byte of command */
+} Sg_scatter_hold;
+
+struct sg_device; /* forward declarations */
+struct sg_fd;
+
+typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
+ Scsi_Request *my_cmdp; /* != 0 when request with lower levels */
+ struct sg_request *nextrp; /* NULL -> tail request (slist) */
+ struct sg_fd *parentfp; /* NULL -> not in use */
+ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
+ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
+ unsigned char sense_b[sizeof (dummy_cmdp->sr_sense_buffer)];
+ char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
+ char orphan; /* 1 -> drop on sight, 0 -> normal */
+ char sg_io_owned; /* 1 -> packet belongs to SG_IO */
+ volatile char done; /* 0->before bh, 1->before read, 2->read */
+} Sg_request;
+
+typedef struct sg_fd { /* holds the state of a file descriptor */
+ struct sg_fd *nextfp; /* NULL when last opened fd on this device */
+ struct sg_device *parentdp; /* owning device */
+ wait_queue_head_t read_wait; /* queue read until command done */
+ rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
+ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
+ Sg_scatter_hold reserve; /* buffer held for this file descriptor */
+ unsigned save_scat_len; /* original length of trunc. scat. element */
+ Sg_request *headrp; /* head of request slist, NULL->empty */
+ struct fasync_struct *async_qp; /* used by asynchronous notification */
+ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
+ char low_dma; /* as in parent but possibly overridden to 1 */
+ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
+ volatile char closed; /* 1 -> fd closed but request(s) outstanding */
+ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
+ char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
+ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
+ char mmap_called; /* 0 -> mmap() never called on this fd */
+} Sg_fd;
+
+typedef struct sg_device { /* holds the state of each scsi generic device */
+ struct scsi_device *device;
+ wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
+ int sg_tablesize; /* adapter's max scatter-gather table size */
+ Sg_fd *headfp; /* first open fd belonging to this device */
+ volatile char detached; /* 0->attached, 1->detached pending removal */
+ volatile char exclude; /* opened for exclusive access */
+ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
+ struct gendisk *disk;
+ struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
+} Sg_device;
+
+static int sg_fasync(int fd, struct file *filp, int mode);
+static void sg_cmd_done(Scsi_Cmnd * SCpnt); /* tasklet or soft irq callback */
+static int sg_start_req(Sg_request * srp);
+static void sg_finish_rem_req(Sg_request * srp);
+static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
+static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
+ int tablesize);
+static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
+ Sg_request * srp);
+static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
+ int blocking, int read_only, Sg_request ** o_srp);
+static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
+ unsigned char *cmnd, int timeout, int blocking);
+static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
+ int wr_xf, int *countp, unsigned char __user **up);
+static int sg_write_xfer(Sg_request * srp);
+static int sg_read_xfer(Sg_request * srp);
+static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
+static void sg_remove_scat(Sg_scatter_hold * schp);
+static void sg_build_reserve(Sg_fd * sfp, int req_size);
+static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
+static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
+static char *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
+static void sg_page_free(char *buff, int size);
+static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
+static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
+static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
+static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+static Sg_request *sg_add_request(Sg_fd * sfp);
+static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
+static int sg_res_in_use(Sg_fd * sfp);
+static int sg_allow_access(unsigned char opcode, char dev_type);
+static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
+static Sg_device *sg_get_dev(int dev);
+static inline unsigned char *sg_scatg2virt(const struct scatterlist *sclp);
+#ifdef CONFIG_SCSI_PROC_FS
+static int sg_last_dev(void);
+#endif
+
+static Sg_device **sg_dev_arr = NULL;
+static int sg_dev_max;
+static int sg_nr_dev;
+
+#define SZ_SG_HEADER sizeof(struct sg_header)
+#define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
+#define SZ_SG_IOVEC sizeof(sg_iovec_t)
+#define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
+
+static int
+sg_open(struct inode *inode, struct file *filp)
+{
+ int dev = iminor(inode);
+ int flags = filp->f_flags;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ int res;
+ int retval;
+
+ nonseekable_open(inode, filp);
+ SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
+ sdp = sg_get_dev(dev);
+ if ((!sdp) || (!sdp->device))
+ return -ENXIO;
+ if (sdp->detached)
+ return -ENODEV;
+
+ /* This driver's module count bumped by fops_get in <linux/fs.h> */
+ /* Prevent the device driver from vanishing while we sleep */
+ retval = scsi_device_get(sdp->device);
+ if (retval)
+ return retval;
+
+ if (!((flags & O_NONBLOCK) ||
+ scsi_block_when_processing_errors(sdp->device))) {
+ retval = -ENXIO;
+ /* we are in error recovery for this device */
+ goto error_out;
+ }
+
+ if (flags & O_EXCL) {
+ if (O_RDONLY == (flags & O_ACCMODE)) {
+ retval = -EPERM; /* Can't lock it with read only access */
+ goto error_out;
+ }
+ if (sdp->headfp && (flags & O_NONBLOCK)) {
+ retval = -EBUSY;
+ goto error_out;
+ }
+ res = 0;
+ __wait_event_interruptible(sdp->o_excl_wait,
+ ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
+ if (res) {
+ retval = res; /* -ERESTARTSYS because signal hit process */
+ goto error_out;
+ }
+ } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
+ if (flags & O_NONBLOCK) {
+ retval = -EBUSY;
+ goto error_out;
+ }
+ res = 0;
+ __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
+ res);
+ if (res) {
+ retval = res; /* -ERESTARTSYS because signal hit process */
+ goto error_out;
+ }
+ }
+ if (sdp->detached) {
+ retval = -ENODEV;
+ goto error_out;
+ }
+ if (!sdp->headfp) { /* no existing opens on this device */
+ sdp->sgdebug = 0;
+ sdp->sg_tablesize = sdp->device->host->sg_tablesize;
+ }
+ if ((sfp = sg_add_sfp(sdp, dev)))
+ filp->private_data = sfp;
+ else {
+ if (flags & O_EXCL)
+ sdp->exclude = 0; /* undo if error */
+ retval = -ENOMEM;
+ goto error_out;
+ }
+ return 0;
+
+ error_out:
+ scsi_device_put(sdp->device);
+ return retval;
+}
+
+/* Following function was formerly called 'sg_close' */
+static int
+sg_release(struct inode *inode, struct file *filp)
+{
+ Sg_device *sdp;
+ Sg_fd *sfp;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+ SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
+ sg_fasync(-1, filp, 0); /* remove filp from async notification list */
+ if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
+ if (!sdp->detached) {
+ scsi_device_put(sdp->device);
+ }
+ sdp->exclude = 0;
+ wake_up_interruptible(&sdp->o_excl_wait);
+ }
+ return 0;
+}
+
+static ssize_t
+sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
+{
+ int res;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ Sg_request *srp;
+ int req_pack_id = -1;
+ struct sg_header old_hdr;
+ sg_io_hdr_t new_hdr;
+ sg_io_hdr_t *hp;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+ SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
+ sdp->disk->disk_name, (int) count));
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+ if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
+ if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
+ return -EFAULT;
+ if (old_hdr.reply_len < 0) {
+ if (count >= SZ_SG_IO_HDR) {
+ if (__copy_from_user
+ (&new_hdr, buf, SZ_SG_IO_HDR))
+ return -EFAULT;
+ req_pack_id = new_hdr.pack_id;
+ }
+ } else
+ req_pack_id = old_hdr.pack_id;
+ }
+ srp = sg_get_rq_mark(sfp, req_pack_id);
+ if (!srp) { /* now wait on packet to arrive */
+ if (sdp->detached)
+ return -ENODEV;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+ while (1) {
+ res = 0; /* following is a macro that beats race condition */
+ __wait_event_interruptible(sfp->read_wait,
+ (sdp->detached || (srp = sg_get_rq_mark(sfp, req_pack_id))),
+ res);
+ if (sdp->detached)
+ return -ENODEV;
+ if (0 == res)
+ break;
+ return res; /* -ERESTARTSYS because signal hit process */
+ }
+ }
+ if (srp->header.interface_id != '\0')
+ return sg_new_read(sfp, buf, count, srp);
+
+ hp = &srp->header;
+ memset(&old_hdr, 0, SZ_SG_HEADER);
+ old_hdr.reply_len = (int) hp->timeout;
+ old_hdr.pack_len = old_hdr.reply_len; /* very old, strange behaviour */
+ old_hdr.pack_id = hp->pack_id;
+ old_hdr.twelve_byte =
+ ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
+ old_hdr.target_status = hp->masked_status;
+ old_hdr.host_status = hp->host_status;
+ old_hdr.driver_status = hp->driver_status;
+ if ((CHECK_CONDITION & hp->masked_status) ||
+ (DRIVER_SENSE & hp->driver_status))
+ memcpy(old_hdr.sense_buffer, srp->sense_b,
+ sizeof (old_hdr.sense_buffer));
+ switch (hp->host_status) {
+ /* This setup of 'result' is for backward compatibility and is best
+ ignored by the user who should use target, host + driver status */
+ case DID_OK:
+ case DID_PASSTHROUGH:
+ case DID_SOFT_ERROR:
+ old_hdr.result = 0;
+ break;
+ case DID_NO_CONNECT:
+ case DID_BUS_BUSY:
+ case DID_TIME_OUT:
+ old_hdr.result = EBUSY;
+ break;
+ case DID_BAD_TARGET:
+ case DID_ABORT:
+ case DID_PARITY:
+ case DID_RESET:
+ case DID_BAD_INTR:
+ old_hdr.result = EIO;
+ break;
+ case DID_ERROR:
+ old_hdr.result = (srp->sense_b[0] == 0 &&
+ hp->masked_status == GOOD) ? 0 : EIO;
+ break;
+ default:
+ old_hdr.result = EIO;
+ break;
+ }
+
+ /* Now copy the result back to the user buffer. */
+ if (count >= SZ_SG_HEADER) {
+ if (__copy_to_user(buf, &old_hdr, SZ_SG_HEADER))
+ return -EFAULT;
+ buf += SZ_SG_HEADER;
+ if (count > old_hdr.reply_len)
+ count = old_hdr.reply_len;
+ if (count > SZ_SG_HEADER) {
+ if ((res =
+ sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)))
+ return -EFAULT;
+ }
+ } else
+ count = (old_hdr.result == 0) ? 0 : -EIO;
+ sg_finish_rem_req(srp);
+ return count;
+}
+
+static ssize_t
+sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
+{
+ sg_io_hdr_t *hp = &srp->header;
+ int err = 0;
+ int len;
+
+ if (count < SZ_SG_IO_HDR) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ hp->sb_len_wr = 0;
+ if ((hp->mx_sb_len > 0) && hp->sbp) {
+ if ((CHECK_CONDITION & hp->masked_status) ||
+ (DRIVER_SENSE & hp->driver_status)) {
+ int sb_len = sizeof (dummy_cmdp->sr_sense_buffer);
+ sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
+ len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
+ len = (len > sb_len) ? sb_len : len;
+ if (copy_to_user(hp->sbp, srp->sense_b, len)) {
+ err = -EFAULT;
+ goto err_out;
+ }
+ hp->sb_len_wr = len;
+ }
+ }
+ if (hp->masked_status || hp->host_status || hp->driver_status)
+ hp->info |= SG_INFO_CHECK;
+ if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
+ err = -EFAULT;
+ goto err_out;
+ }
+ err = sg_read_xfer(srp);
+ err_out:
+ sg_finish_rem_req(srp);
+ return (0 == err) ? count : err;
+}
+
+static ssize_t
+sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+{
+ int mxsize, cmd_size, k;
+ int input_size, blocking;
+ unsigned char opcode;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ Sg_request *srp;
+ struct sg_header old_hdr;
+ sg_io_hdr_t *hp;
+ unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)];
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+ SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
+ sdp->disk->disk_name, (int) count));
+ if (sdp->detached)
+ return -ENODEV;
+ if (!((filp->f_flags & O_NONBLOCK) ||
+ scsi_block_when_processing_errors(sdp->device)))
+ return -ENXIO;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT; /* protects following copy_from_user()s + get_user()s */
+ if (count < SZ_SG_HEADER)
+ return -EIO;
+ if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
+ return -EFAULT;
+ blocking = !(filp->f_flags & O_NONBLOCK);
+ if (old_hdr.reply_len < 0)
+ return sg_new_write(sfp, buf, count, blocking, 0, NULL);
+ if (count < (SZ_SG_HEADER + 6))
+ return -EIO; /* The minimum scsi command length is 6 bytes. */
+
+ if (!(srp = sg_add_request(sfp))) {
+ SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
+ return -EDOM;
+ }
+ buf += SZ_SG_HEADER;
+ __get_user(opcode, buf);
+ if (sfp->next_cmd_len > 0) {
+ if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
+ SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
+ sfp->next_cmd_len = 0;
+ sg_remove_request(sfp, srp);
+ return -EIO;
+ }
+ cmd_size = sfp->next_cmd_len;
+ sfp->next_cmd_len = 0; /* reset so only this write() effected */
+ } else {
+ cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
+ if ((opcode >= 0xc0) && old_hdr.twelve_byte)
+ cmd_size = 12;
+ }
+ SCSI_LOG_TIMEOUT(4, printk(
+ "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
+/* Determine buffer size. */
+ input_size = count - cmd_size;
+ mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
+ mxsize -= SZ_SG_HEADER;
+ input_size -= SZ_SG_HEADER;
+ if (input_size < 0) {
+ sg_remove_request(sfp, srp);
+ return -EIO; /* User did not pass enough bytes for this command. */
+ }
+ hp = &srp->header;
+ hp->interface_id = '\0'; /* indicator of old interface tunnelled */
+ hp->cmd_len = (unsigned char) cmd_size;
+ hp->iovec_count = 0;
+ hp->mx_sb_len = 0;
+ if (input_size > 0)
+ hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
+ SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
+ else
+ hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
+ hp->dxfer_len = mxsize;
+ hp->dxferp = (char __user *)buf + cmd_size;
+ hp->sbp = NULL;
+ hp->timeout = old_hdr.reply_len; /* structure abuse ... */
+ hp->flags = input_size; /* structure abuse ... */
+ hp->pack_id = old_hdr.pack_id;
+ hp->usr_ptr = NULL;
+ if (__copy_from_user(cmnd, buf, cmd_size))
+ return -EFAULT;
+ /*
+ * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
+ * but is is possible that the app intended SG_DXFER_TO_DEV, because there
+ * is a non-zero input_size, so emit a warning.
+ */
+ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)
+ if (printk_ratelimit())
+ printk(KERN_WARNING
+ "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
+ "guessing data in;\n" KERN_WARNING " "
+ "program %s not setting count and/or reply_len properly\n",
+ old_hdr.reply_len - (int)SZ_SG_HEADER,
+ input_size, (unsigned int) cmnd[0],
+ current->comm);
+ k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
+ return (k < 0) ? k : count;
+}
+
+static ssize_t
+sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
+ int blocking, int read_only, Sg_request ** o_srp)
+{
+ int k;
+ Sg_request *srp;
+ sg_io_hdr_t *hp;
+ unsigned char cmnd[sizeof (dummy_cmdp->sr_cmnd)];
+ int timeout;
+ unsigned long ul_timeout;
+
+ if (count < SZ_SG_IO_HDR)
+ return -EINVAL;
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT; /* protects following copy_from_user()s + get_user()s */
+
+ sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
+ if (!(srp = sg_add_request(sfp))) {
+ SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
+ return -EDOM;
+ }
+ hp = &srp->header;
+ if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
+ sg_remove_request(sfp, srp);
+ return -EFAULT;
+ }
+ if (hp->interface_id != 'S') {
+ sg_remove_request(sfp, srp);
+ return -ENOSYS;
+ }
+ if (hp->flags & SG_FLAG_MMAP_IO) {
+ if (hp->dxfer_len > sfp->reserve.bufflen) {
+ sg_remove_request(sfp, srp);
+ return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
+ }
+ if (hp->flags & SG_FLAG_DIRECT_IO) {
+ sg_remove_request(sfp, srp);
+ return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
+ }
+ if (sg_res_in_use(sfp)) {
+ sg_remove_request(sfp, srp);
+ return -EBUSY; /* reserve buffer already being used */
+ }
+ }
+ ul_timeout = msecs_to_jiffies(srp->header.timeout);
+ timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
+ if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
+ sg_remove_request(sfp, srp);
+ return -EMSGSIZE;
+ }
+ if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
+ sg_remove_request(sfp, srp);
+ return -EFAULT; /* protects following copy_from_user()s + get_user()s */
+ }
+ if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
+ sg_remove_request(sfp, srp);
+ return -EFAULT;
+ }
+ if (read_only &&
+ (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
+ sg_remove_request(sfp, srp);
+ return -EPERM;
+ }
+ k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
+ if (k < 0)
+ return k;
+ if (o_srp)
+ *o_srp = srp;
+ return count;
+}
+
+static int
+sg_common_write(Sg_fd * sfp, Sg_request * srp,
+ unsigned char *cmnd, int timeout, int blocking)
+{
+ int k;
+ Scsi_Request *SRpnt;
+ Sg_device *sdp = sfp->parentdp;
+ sg_io_hdr_t *hp = &srp->header;
+ request_queue_t *q;
+
+ srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
+ hp->status = 0;
+ hp->masked_status = 0;
+ hp->msg_status = 0;
+ hp->info = 0;
+ hp->host_status = 0;
+ hp->driver_status = 0;
+ hp->resid = 0;
+ SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
+ (int) cmnd[0], (int) hp->cmd_len));
+
+ if ((k = sg_start_req(srp))) {
+ SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k));
+ sg_finish_rem_req(srp);
+ return k; /* probably out of space --> ENOMEM */
+ }
+ if ((k = sg_write_xfer(srp))) {
+ SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
+ sg_finish_rem_req(srp);
+ return k;
+ }
+ if (sdp->detached) {
+ sg_finish_rem_req(srp);
+ return -ENODEV;
+ }
+ SRpnt = scsi_allocate_request(sdp->device, GFP_ATOMIC);
+ if (SRpnt == NULL) {
+ SCSI_LOG_TIMEOUT(1, printk("sg_write: no mem\n"));
+ sg_finish_rem_req(srp);
+ return -ENOMEM;
+ }
+
+ srp->my_cmdp = SRpnt;
+ q = SRpnt->sr_device->request_queue;
+ SRpnt->sr_request->rq_disk = sdp->disk;
+ SRpnt->sr_sense_buffer[0] = 0;
+ SRpnt->sr_cmd_len = hp->cmd_len;
+ SRpnt->sr_use_sg = srp->data.k_use_sg;
+ SRpnt->sr_sglist_len = srp->data.sglist_len;
+ SRpnt->sr_bufflen = srp->data.bufflen;
+ SRpnt->sr_underflow = 0;
+ SRpnt->sr_buffer = srp->data.buffer;
+ switch (hp->dxfer_direction) {
+ case SG_DXFER_TO_FROM_DEV:
+ case SG_DXFER_FROM_DEV:
+ SRpnt->sr_data_direction = SCSI_DATA_READ;
+ break;
+ case SG_DXFER_TO_DEV:
+ SRpnt->sr_data_direction = SCSI_DATA_WRITE;
+ break;
+ case SG_DXFER_UNKNOWN:
+ SRpnt->sr_data_direction = SCSI_DATA_UNKNOWN;
+ break;
+ default:
+ SRpnt->sr_data_direction = SCSI_DATA_NONE;
+ break;
+ }
+ SRpnt->upper_private_data = srp;
+ srp->data.k_use_sg = 0;
+ srp->data.sglist_len = 0;
+ srp->data.bufflen = 0;
+ srp->data.buffer = NULL;
+ hp->duration = jiffies; /* unit jiffies now, millisecs after done */
+/* Now send everything of to mid-level. The next time we hear about this
+ packet is when sg_cmd_done() is called (i.e. a callback). */
+ scsi_do_req(SRpnt, (void *) cmnd,
+ (void *) SRpnt->sr_buffer, hp->dxfer_len,
+ sg_cmd_done, timeout, SG_DEFAULT_RETRIES);
+ /* dxfer_len overwrites SRpnt->sr_bufflen, hence need for b_malloc_len */
+ return 0;
+}
+
+static int
+sg_srp_done(Sg_request *srp, Sg_fd *sfp)
+{
+ unsigned long iflags;
+ int done;
+
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ done = srp->done;
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return done;
+}
+
+static int
+sg_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd_in, unsigned long arg)
+{
+ void __user *p = (void __user *)arg;
+ int __user *ip = p;
+ int result, val, read_only;
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ Sg_request *srp;
+ unsigned long iflags;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+ SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
+ sdp->disk->disk_name, (int) cmd_in));
+ read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
+
+ switch (cmd_in) {
+ case SG_IO:
+ {
+ int blocking = 1; /* ignore O_NONBLOCK flag */
+
+ if (sdp->detached)
+ return -ENODEV;
+ if (!scsi_block_when_processing_errors(sdp->device))
+ return -ENXIO;
+ if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
+ return -EFAULT;
+ result =
+ sg_new_write(sfp, p, SZ_SG_IO_HDR,
+ blocking, read_only, &srp);
+ if (result < 0)
+ return result;
+ srp->sg_io_owned = 1;
+ while (1) {
+ result = 0; /* following macro to beat race condition */
+ __wait_event_interruptible(sfp->read_wait,
+ (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
+ result);
+ if (sdp->detached)
+ return -ENODEV;
+ if (sfp->closed)
+ return 0; /* request packet dropped already */
+ if (0 == result)
+ break;
+ srp->orphan = 1;
+ return result; /* -ERESTARTSYS because signal hit process */
+ }
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+ srp->done = 2;
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
+ return (result < 0) ? result : 0;
+ }
+ case SG_SET_TIMEOUT:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ if (val < 0)
+ return -EIO;
+ if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
+ val = MULDIV (INT_MAX, USER_HZ, HZ);
+ sfp->timeout_user = val;
+ sfp->timeout = MULDIV (val, HZ, USER_HZ);
+
+ return 0;
+ case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
+ /* strange ..., for backward compatibility */
+ return sfp->timeout_user;
+ case SG_SET_FORCE_LOW_DMA:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ if (val) {
+ sfp->low_dma = 1;
+ if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+ val = (int) sfp->reserve.bufflen;
+ sg_remove_scat(&sfp->reserve);
+ sg_build_reserve(sfp, val);
+ }
+ } else {
+ if (sdp->detached)
+ return -ENODEV;
+ sfp->low_dma = sdp->device->host->unchecked_isa_dma;
+ }
+ return 0;
+ case SG_GET_LOW_DMA:
+ return put_user((int) sfp->low_dma, ip);
+ case SG_GET_SCSI_ID:
+ if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
+ return -EFAULT;
+ else {
+ sg_scsi_id_t __user *sg_idp = p;
+
+ if (sdp->detached)
+ return -ENODEV;
+ __put_user((int) sdp->device->host->host_no,
+ &sg_idp->host_no);
+ __put_user((int) sdp->device->channel,
+ &sg_idp->channel);
+ __put_user((int) sdp->device->id, &sg_idp->scsi_id);
+ __put_user((int) sdp->device->lun, &sg_idp->lun);
+ __put_user((int) sdp->device->type, &sg_idp->scsi_type);
+ __put_user((short) sdp->device->host->cmd_per_lun,
+ &sg_idp->h_cmd_per_lun);
+ __put_user((short) sdp->device->queue_depth,
+ &sg_idp->d_queue_depth);
+ __put_user(0, &sg_idp->unused[0]);
+ __put_user(0, &sg_idp->unused[1]);
+ return 0;
+ }
+ case SG_SET_FORCE_PACK_ID:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sfp->force_packid = val ? 1 : 0;
+ return 0;
+ case SG_GET_PACK_ID:
+ if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
+ return -EFAULT;
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ for (srp = sfp->headrp; srp; srp = srp->nextrp) {
+ if ((1 == srp->done) && (!srp->sg_io_owned)) {
+ read_unlock_irqrestore(&sfp->rq_list_lock,
+ iflags);
+ __put_user(srp->header.pack_id, ip);
+ return 0;
+ }
+ }
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ __put_user(-1, ip);
+ return 0;
+ case SG_GET_NUM_WAITING:
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
+ if ((1 == srp->done) && (!srp->sg_io_owned))
+ ++val;
+ }
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return put_user(val, ip);
+ case SG_GET_SG_TABLESIZE:
+ return put_user(sdp->sg_tablesize, ip);
+ case SG_SET_RESERVED_SIZE:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ if (val < 0)
+ return -EINVAL;
+ if (val != sfp->reserve.bufflen) {
+ if (sg_res_in_use(sfp) || sfp->mmap_called)
+ return -EBUSY;
+ sg_remove_scat(&sfp->reserve);
+ sg_build_reserve(sfp, val);
+ }
+ return 0;
+ case SG_GET_RESERVED_SIZE:
+ val = (int) sfp->reserve.bufflen;
+ return put_user(val, ip);
+ case SG_SET_COMMAND_Q:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sfp->cmd_q = val ? 1 : 0;
+ return 0;
+ case SG_GET_COMMAND_Q:
+ return put_user((int) sfp->cmd_q, ip);
+ case SG_SET_KEEP_ORPHAN:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sfp->keep_orphan = val;
+ return 0;
+ case SG_GET_KEEP_ORPHAN:
+ return put_user((int) sfp->keep_orphan, ip);
+ case SG_NEXT_CMD_LEN:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sfp->next_cmd_len = (val > 0) ? val : 0;
+ return 0;
+ case SG_GET_VERSION_NUM:
+ return put_user(sg_version_num, ip);
+ case SG_GET_ACCESS_COUNT:
+ /* faked - we don't have a real access count anymore */
+ val = (sdp->device ? 1 : 0);
+ return put_user(val, ip);
+ case SG_GET_REQUEST_TABLE:
+ if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
+ return -EFAULT;
+ else {
+ sg_req_info_t rinfo[SG_MAX_QUEUE];
+ Sg_request *srp;
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+ for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
+ ++val, srp = srp ? srp->nextrp : srp) {
+ memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
+ if (srp) {
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ rinfo[val].duration =
+ srp->done ? srp->header.duration :
+ jiffies_to_msecs(
+ jiffies - srp->header.duration);
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned = srp->sg_io_owned;
+ rinfo[val].pack_id = srp->header.pack_id;
+ rinfo[val].usr_ptr = srp->header.usr_ptr;
+ }
+ }
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return (__copy_to_user(p, rinfo,
+ SZ_SG_REQ_INFO * SG_MAX_QUEUE) ? -EFAULT : 0);
+ }
+ case SG_EMULATED_HOST:
+ if (sdp->detached)
+ return -ENODEV;
+ return put_user(sdp->device->host->hostt->emulated, ip);
+ case SG_SCSI_RESET:
+ if (sdp->detached)
+ return -ENODEV;
+ if (filp->f_flags & O_NONBLOCK) {
+ if (test_bit(SHOST_RECOVERY,
+ &sdp->device->host->shost_state))
+ return -EBUSY;
+ } else if (!scsi_block_when_processing_errors(sdp->device))
+ return -EBUSY;
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ if (SG_SCSI_RESET_NOTHING == val)
+ return 0;
+ switch (val) {
+ case SG_SCSI_RESET_DEVICE:
+ val = SCSI_TRY_RESET_DEVICE;
+ break;
+ case SG_SCSI_RESET_BUS:
+ val = SCSI_TRY_RESET_BUS;
+ break;
+ case SG_SCSI_RESET_HOST:
+ val = SCSI_TRY_RESET_HOST;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ return -EACCES;
+ return (scsi_reset_provider(sdp->device, val) ==
+ SUCCESS) ? 0 : -EIO;
+ case SCSI_IOCTL_SEND_COMMAND:
+ if (sdp->detached)
+ return -ENODEV;
+ if (read_only) {
+ unsigned char opcode = WRITE_6;
+ Scsi_Ioctl_Command __user *siocp = p;
+
+ if (copy_from_user(&opcode, siocp->data, 1))
+ return -EFAULT;
+ if (!sg_allow_access(opcode, sdp->device->type))
+ return -EPERM;
+ }
+ return scsi_ioctl_send_command(sdp->device, p);
+ case SG_SET_DEBUG:
+ result = get_user(val, ip);
+ if (result)
+ return result;
+ sdp->sgdebug = (char) val;
+ return 0;
+ case SCSI_IOCTL_GET_IDLUN:
+ case SCSI_IOCTL_GET_BUS_NUMBER:
+ case SCSI_IOCTL_PROBE_HOST:
+ case SG_GET_TRANSFORM:
+ if (sdp->detached)
+ return -ENODEV;
+ return scsi_ioctl(sdp->device, cmd_in, p);
+ default:
+ if (read_only)
+ return -EPERM; /* don't know so take safe approach */
+ return scsi_ioctl(sdp->device, cmd_in, p);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+{
+ Sg_device *sdp;
+ Sg_fd *sfp;
+ struct scsi_device *sdev;
+
+ if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+ return -ENXIO;
+
+ sdev = sdp->device;
+ if (sdev->host->hostt->compat_ioctl) {
+ int ret;
+
+ ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);