diff options
Diffstat (limited to 'drivers/ata/libata-scsi.c')
| -rw-r--r-- | drivers/ata/libata-scsi.c | 1896 |
1 files changed, 1192 insertions, 704 deletions
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 15795394b0a..72691fd9394 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1,7 +1,7 @@ /* * libata-scsi.c - helper library for ATA * - * Maintained by: Jeff Garzik <jgarzik@pobox.com> + * Maintained by: Tejun Heo <tj@kernel.org> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * @@ -33,9 +33,11 @@ * */ +#include <linux/slab.h> #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/spinlock.h> +#include <linux/export.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> @@ -46,10 +48,16 @@ #include <linux/libata.h> #include <linux/hdreg.h> #include <linux/uaccess.h> +#include <linux/suspend.h> +#include <asm/unaligned.h> #include "libata.h" +#include "libata-transport.h" -#define SECTOR_SIZE 512 +#define ATA_SCSI_RBUF_SIZE 4096 + +static DEFINE_SPINLOCK(ata_scsi_rbuf_lock); +static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE]; typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); @@ -57,9 +65,6 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); -static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, - unsigned int id, unsigned int lun); - #define RW_RECOVERY_MPAGE 0x1 #define RW_RECOVERY_MPAGE_LEN 12 @@ -99,91 +104,270 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = { 0, 30 /* extended self test time, see 05-359r1 */ }; -/* - * libata transport template. libata doesn't do real transport stuff. - * It just needs the eh_timed_out hook. - */ -static struct scsi_transport_template ata_scsi_transport_template = { - .eh_strategy_handler = ata_scsi_error, - .eh_timed_out = ata_scsi_timed_out, - .user_scan = ata_scsi_user_scan, +static const char *ata_lpm_policy_names[] = { + [ATA_LPM_UNKNOWN] = "max_performance", + [ATA_LPM_MAX_POWER] = "max_performance", + [ATA_LPM_MED_POWER] = "medium_power", + [ATA_LPM_MIN_POWER] = "min_power", }; +static ssize_t ata_scsi_lpm_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(device); + struct ata_port *ap = ata_shost_to_port(shost); + struct ata_link *link; + struct ata_device *dev; + enum ata_lpm_policy policy; + unsigned long flags; -static const struct { - enum link_pm value; - const char *name; -} link_pm_policy[] = { - { NOT_AVAILABLE, "max_performance" }, - { MIN_POWER, "min_power" }, - { MAX_PERFORMANCE, "max_performance" }, - { MEDIUM_POWER, "medium_power" }, -}; + /* UNKNOWN is internal state, iterate from MAX_POWER */ + for (policy = ATA_LPM_MAX_POWER; + policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) { + const char *name = ata_lpm_policy_names[policy]; -static const char *ata_scsi_lpm_get(enum link_pm policy) -{ - int i; + if (strncmp(name, buf, strlen(name)) == 0) + break; + } + if (policy == ARRAY_SIZE(ata_lpm_policy_names)) + return -EINVAL; - for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++) - if (link_pm_policy[i].value == policy) - return link_pm_policy[i].name; + spin_lock_irqsave(ap->lock, flags); - return NULL; + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, &ap->link, ENABLED) { + if (dev->horkage & ATA_HORKAGE_NOLPM) { + count = -EOPNOTSUPP; + goto out_unlock; + } + } + } + + ap->target_lpm_policy = policy; + ata_port_schedule_eh(ap); +out_unlock: + spin_unlock_irqrestore(ap->lock, flags); + return count; } -static ssize_t ata_scsi_lpm_put(struct class_device *class_dev, - const char *buf, size_t count) +static ssize_t ata_scsi_lpm_show(struct device *dev, + struct device_attribute *attr, char *buf) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ata_port *ap = ata_shost_to_port(shost); - enum link_pm policy = 0; - int i; - /* - * we are skipping array location 0 on purpose - this - * is because a value of NOT_AVAILABLE is displayed - * to the user as max_performance, but when the user - * writes "max_performance", they actually want the - * value to match MAX_PERFORMANCE. - */ - for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) { - const int len = strlen(link_pm_policy[i].name); - if (strncmp(link_pm_policy[i].name, buf, len) == 0 && - buf[len] == '\n') { - policy = link_pm_policy[i].value; + if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) + return -EINVAL; + + return snprintf(buf, PAGE_SIZE, "%s\n", + ata_lpm_policy_names[ap->target_lpm_policy]); +} +DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, + ata_scsi_lpm_show, ata_scsi_lpm_store); +EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); + +static ssize_t ata_scsi_park_show(struct device *device, + struct device_attribute *attr, char *buf) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap; + struct ata_link *link; + struct ata_device *dev; + unsigned long flags, now; + unsigned int uninitialized_var(msecs); + int rc = 0; + + ap = ata_shost_to_port(sdev->host); + + spin_lock_irqsave(ap->lock, flags); + dev = ata_scsi_find_dev(ap, sdev); + if (!dev) { + rc = -ENODEV; + goto unlock; + } + if (dev->flags & ATA_DFLAG_NO_UNLOAD) { + rc = -EOPNOTSUPP; + goto unlock; + } + + link = dev->link; + now = jiffies; + if (ap->pflags & ATA_PFLAG_EH_IN_PROGRESS && + link->eh_context.unloaded_mask & (1 << dev->devno) && + time_after(dev->unpark_deadline, now)) + msecs = jiffies_to_msecs(dev->unpark_deadline - now); + else + msecs = 0; + +unlock: + spin_unlock_irq(ap->lock); + + return rc ? rc : snprintf(buf, 20, "%u\n", msecs); +} + +static ssize_t ata_scsi_park_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap; + struct ata_device *dev; + long int input; + unsigned long flags; + int rc; + + rc = kstrtol(buf, 10, &input); + if (rc) + return rc; + if (input < -2) + return -EINVAL; + if (input > ATA_TMOUT_MAX_PARK) { + rc = -EOVERFLOW; + input = ATA_TMOUT_MAX_PARK; + } + + ap = ata_shost_to_port(sdev->host); + + spin_lock_irqsave(ap->lock, flags); + dev = ata_scsi_find_dev(ap, sdev); + if (unlikely(!dev)) { + rc = -ENODEV; + goto unlock; + } + if (dev->class != ATA_DEV_ATA) { + rc = -EOPNOTSUPP; + goto unlock; + } + + if (input >= 0) { + if (dev->flags & ATA_DFLAG_NO_UNLOAD) { + rc = -EOPNOTSUPP; + goto unlock; + } + + dev->unpark_deadline = ata_deadline(jiffies, input); + dev->link->eh_info.dev_action[dev->devno] |= ATA_EH_PARK; + ata_port_schedule_eh(ap); + complete(&ap->park_req_pending); + } else { + switch (input) { + case -1: + dev->flags &= ~ATA_DFLAG_NO_UNLOAD; + break; + case -2: + dev->flags |= ATA_DFLAG_NO_UNLOAD; break; } } - if (!policy) - return -EINVAL; +unlock: + spin_unlock_irqrestore(ap->lock, flags); - ata_lpm_schedule(ap, policy); - return count; + return rc ? rc : len; +} +DEVICE_ATTR(unload_heads, S_IRUGO | S_IWUSR, + ata_scsi_park_show, ata_scsi_park_store); +EXPORT_SYMBOL_GPL(dev_attr_unload_heads); + +static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) +{ + cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; + + scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq); } static ssize_t -ata_scsi_lpm_show(struct class_device *class_dev, char *buf) +ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { - struct Scsi_Host *shost = class_to_shost(class_dev); + struct Scsi_Host *shost = class_to_shost(dev); struct ata_port *ap = ata_shost_to_port(shost); - const char *policy = - ata_scsi_lpm_get(ap->pm_policy); + if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM)) + return ap->ops->em_store(ap, buf, count); + return -EINVAL; +} - if (!policy) - return -EINVAL; +static ssize_t +ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM)) + return ap->ops->em_show(ap, buf); + return -EINVAL; +} +DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR, + ata_scsi_em_message_show, ata_scsi_em_message_store); +EXPORT_SYMBOL_GPL(dev_attr_em_message); + +static ssize_t +ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct ata_port *ap = ata_shost_to_port(shost); + + return snprintf(buf, 23, "%d\n", ap->em_message_type); +} +DEVICE_ATTR(em_message_type, S_IRUGO, + ata_scsi_em_message_type_show, NULL); +EXPORT_SYMBOL_GPL(dev_attr_em_message_type); + +static ssize_t +ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); + + if (atadev && ap->ops->sw_activity_show && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) + return ap->ops->sw_activity_show(atadev, buf); + return -EINVAL; +} + +static ssize_t +ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); + enum sw_activity val; + int rc; - return snprintf(buf, 23, "%s\n", policy); + if (atadev && ap->ops->sw_activity_store && + (ap->flags & ATA_FLAG_SW_ACTIVITY)) { + val = simple_strtoul(buf, NULL, 0); + switch (val) { + case OFF: case BLINK_ON: case BLINK_OFF: + rc = ap->ops->sw_activity_store(atadev, val); + if (!rc) + return count; + else + return rc; + } + } + return -EINVAL; } -CLASS_DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, - ata_scsi_lpm_show, ata_scsi_lpm_put); -EXPORT_SYMBOL_GPL(class_device_attr_link_power_management_policy); +DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show, + ata_scsi_activity_store); +EXPORT_SYMBOL_GPL(dev_attr_sw_activity); + +struct device_attribute *ata_common_sdev_attrs[] = { + &dev_attr_unload_heads, + NULL +}; +EXPORT_SYMBOL_GPL(ata_common_sdev_attrs); -static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) +static void ata_scsi_invalid_field(struct scsi_cmnd *cmd) { ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); /* "Invalid field in cbd" */ - done(cmd); + cmd->scsi_done(cmd); } /** @@ -216,7 +400,37 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, } /** + * ata_scsi_unlock_native_capacity - unlock native capacity + * @sdev: SCSI device to adjust device capacity for + * + * This function is called if a partition on @sdev extends beyond + * the end of the device. It requests EH to unlock HPA. + * + * LOCKING: + * Defined by the SCSI layer. Might sleep. + */ +void ata_scsi_unlock_native_capacity(struct scsi_device *sdev) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + struct ata_device *dev; + unsigned long flags; + + spin_lock_irqsave(ap->lock, flags); + + dev = ata_scsi_find_dev(ap, sdev); + if (dev && dev->n_sectors < dev->n_native_sectors) { + dev->flags |= ATA_DFLAG_UNLOCK_HPA; + dev->link->eh_info.action |= ATA_EH_RESET; + ata_port_schedule_eh(ap); + } + + spin_unlock_irqrestore(ap->lock, flags); + ata_port_wait_eh(ap); +} + +/** * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl + * @ap: target port * @sdev: SCSI device to get identify data for * @arg: User buffer area for identify data * @@ -226,9 +440,9 @@ int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, * RETURNS: * Zero on success, negative errno on error. */ -static int ata_get_identity(struct scsi_device *sdev, void __user *arg) +static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev, + void __user *arg) { - struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev = ata_scsi_find_dev(ap, sdev); u16 __user *dst = arg; char buf[40]; @@ -287,7 +501,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) memset(scsi_cmd, 0, sizeof(scsi_cmd)); if (args[3]) { - argsize = SECTOR_SIZE * args[3]; + argsize = ATA_SECT_SIZE * args[3]; argbuf = kmalloc(argsize, GFP_KERNEL); if (argbuf == NULL) { rc = -ENOMEM; @@ -307,7 +521,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) scsi_cmd[0] = ATA_16; scsi_cmd[4] = args[2]; - if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */ + if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */ scsi_cmd[6] = args[3]; scsi_cmd[8] = args[1]; scsi_cmd[10] = 0x4f; @@ -320,7 +534,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) /* Good values for timeout and retries? Values below from scsi_ioctl_send_command() for default case... */ cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, - sensebuf, (10*HZ), 5, 0); + sensebuf, (10*HZ), 5, 0, NULL); if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ u8 *desc = sensebuf + 8; @@ -332,8 +546,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) struct scsi_sense_hdr sshdr; scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (sshdr.sense_key == 0 && - sshdr.asc == 0 && sshdr.ascq == 0) + if (sshdr.sense_key == RECOVERED_ERROR && + sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; } @@ -406,7 +620,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) /* Good values for timeout and retries? Values below from scsi_ioctl_send_command() for default case... */ cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0, - sensebuf, (10*HZ), 5, 0); + sensebuf, (10*HZ), 5, 0, NULL); if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ u8 *desc = sensebuf + 8; @@ -418,8 +632,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) struct scsi_sense_hdr sshdr; scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sshdr); - if (sshdr.sense_key == 0 && - sshdr.asc == 0 && sshdr.ascq == 0) + if (sshdr.sense_key == RECOVERED_ERROR && + sshdr.asc == 0 && sshdr.ascq == 0x1d) cmd_result &= ~SAM_STAT_CHECK_CONDITION; } @@ -448,25 +662,48 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) return rc; } -int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) +static int ata_ioc32(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_PIO_DMA) + return 1; + if (ap->pflags & ATA_PFLAG_PIO32) + return 1; + return 0; +} + +int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, + int cmd, void __user *arg) { int val = -EINVAL, rc = -EINVAL; + unsigned long flags; switch (cmd) { case ATA_IOC_GET_IO32: - val = 0; + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); if (copy_to_user(arg, &val, 1)) return -EFAULT; return 0; case ATA_IOC_SET_IO32: val = (unsigned long) arg; - if (val != 0) - return -EINVAL; - return 0; + rc = 0; + spin_lock_irqsave(ap->lock, flags); + if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { + if (val) + ap->pflags |= ATA_PFLAG_PIO32; + else + ap->pflags &= ~ATA_PFLAG_PIO32; + } else { + if (val != ata_ioc32(ap)) + rc = -EINVAL; + } + spin_unlock_irqrestore(ap->lock, flags); + return rc; case HDIO_GET_IDENTITY: - return ata_get_identity(scsidev, arg); + return ata_get_identity(ap, scsidev, arg); case HDIO_DRIVE_CMD: if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) @@ -485,12 +722,19 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) return rc; } +EXPORT_SYMBOL_GPL(ata_sas_scsi_ioctl); + +int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) +{ + return ata_sas_scsi_ioctl(ata_shost_to_port(scsidev->host), + scsidev, cmd, arg); +} +EXPORT_SYMBOL_GPL(ata_scsi_ioctl); /** * ata_scsi_qc_new - acquire new ata_queued_cmd reference * @dev: ATA device to which the new command is attached * @cmd: SCSI command that originated this ATA command - * @done: SCSI command completion function * * Obtain a reference to an unused ata_queued_cmd structure, * which is the basic libata structure representing a single @@ -507,21 +751,20 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) * Command allocated, or %NULL if none available. */ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, - struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) + struct scsi_cmnd *cmd) { struct ata_queued_cmd *qc; qc = ata_qc_new_init(dev); if (qc) { qc->scsicmd = cmd; - qc->scsidone = done; + qc->scsidone = cmd->scsi_done; qc->sg = scsi_sglist(cmd); qc->n_elem = scsi_sg_count(cmd); } else { cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); - done(cmd); + cmd->scsi_done(cmd); } return qc; @@ -619,25 +862,24 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, /* Bad address mark */ {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field /* TRK0 */ - {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error - /* Abort & !ICRC */ - {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command + {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error + /* Abort: 0x04 is not translated here, see below */ /* Media change request */ {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline - /* SRV */ - {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found - /* Media change */ - {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline + /* SRV/IDNF */ + {0x10, ILLEGAL_REQUEST, 0x21, 0x00}, // ID not found Logical address out of range + /* MC */ + {0x20, UNIT_ATTENTION, 0x28, 0x00}, // Media Changed Not ready to ready change, medium may have changed /* ECC */ {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error /* BBD - block marked bad */ - {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error + {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error {0xFF, 0xFF, 0xFF, 0xFF}, // END mark }; static const unsigned char stat_table[][4] = { /* Must be first because BUSY means no other bits valid */ {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now - {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault + {0x20, HARDWARE_ERROR, 0x44, 0x00}, // Device fault, internal target failure {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered {0xFF, 0xFF, 0xFF, 0xFF}, // END mark @@ -662,13 +904,13 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, goto translate_done; } } - /* No immediate match */ - if (verbose) - printk(KERN_WARNING "ata%u: no sense translation for " - "error 0x%02x\n", id, drv_err); } - /* Fall back to interpreting status bits */ + /* + * Fall back to interpreting status bits. Note that if the drv_err + * has only the ABRT bit set, we decode drv_stat. ABRT by itself + * is not descriptive enough. + */ for (i = 0; stat_table[i][0] != 0xFF; i++) { if (stat_table[i][0] & drv_stat) { *sk = stat_table[i][1]; @@ -677,13 +919,11 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, goto translate_done; } } - /* No error? Undecoded? */ - if (verbose) - printk(KERN_WARNING "ata%u: no sense translation for " - "status: 0x%02x\n", id, drv_stat); - /* We need a sensible error return here, which is tricky, and one - that won't cause people to do things like return a disk wrongly */ + /* + * We need a sensible error return here, which is tricky, and one + * that won't cause people to do things like return a disk wrongly. + */ *sk = ABORTED_COMMAND; *asc = 0x00; *ascq = 0x00; @@ -704,7 +944,11 @@ static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, * block specified for the ATA pass through commands. Regardless * of whether the command errored or not, return a sense * block. Copy all controller registers into the sense - * block. Clear sense key, ASC & ASCQ if there is no error. + * block. If there was no error, we get the request from an ATA + * passthrough command, so we use the following sense data: + * sk = RECOVERED ERROR + * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE + * * * LOCKING: * None. @@ -730,6 +974,10 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, &sb[1], &sb[2], &sb[3], verbose); sb[1] &= 0x0f; + } else { + sb[1] = RECOVERED_ERROR; + sb[2] = 0; + sb[3] = 0x1D; } /* @@ -773,7 +1021,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) * @qc: Command that we are erroring out * * Generate sense block for a failed ATA command @qc. Descriptor - * format is used to accomodate LBA48 block address. + * format is used to accommodate LBA48 block address. * * LOCKING: * None. @@ -825,6 +1073,8 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) { sdev->use_10_for_rw = 1; sdev->use_10_for_ms = 1; + sdev->no_report_opcodes = 1; + sdev->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() callback and * it needs to see every deferred qc. Set dev_blocked to 1 to @@ -851,10 +1101,10 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev) */ static int atapi_drain_needed(struct request *rq) { - if (likely(!blk_pc_request(rq))) + if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) return 0; - if (!rq->data_len || (rq->cmd_flags & REQ_RW)) + if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE)) return 0; return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC; @@ -863,34 +1113,49 @@ static int atapi_drain_needed(struct request *rq) static int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) { + struct request_queue *q = sdev->request_queue; + + if (!ata_id_has_unload(dev->id)) + dev->flags |= ATA_DFLAG_NO_UNLOAD; + /* configure max sectors */ - blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); + blk_queue_max_hw_sectors(q, dev->max_sectors); if (dev->class == ATA_DEV_ATAPI) { - struct request_queue *q = sdev->request_queue; void *buf; - /* set the min alignment and padding */ - blk_queue_update_dma_alignment(sdev->request_queue, - ATA_DMA_PAD_SZ - 1); - blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); + sdev->sector_size = ATA_SECT_SIZE; + + /* set DMA padding */ + blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); /* configure draining */ buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); if (!buf) { - ata_dev_printk(dev, KERN_ERR, - "drain buffer allocation failed\n"); + ata_dev_err(dev, "drain buffer allocation failed\n"); return -ENOMEM; } blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); } else { - /* ATA devices must be sector aligned */ - blk_queue_update_dma_alignment(sdev->request_queue, - ATA_SECT_SIZE - 1); + sdev->sector_size = ata_id_logical_sector_size(dev->id); sdev->manage_start_stop = 1; } + /* + * ata_pio_sectors() expects buffer for each sector to not cross + * page boundary. Enforce it by requiring buffers to be sector + * aligned, which works iff sector_size is not larger than + * PAGE_SIZE. ATAPI devices also need the alignment as + * IDENTIFY_PACKET is executed as ATA_PROT_PIO. + */ + if (sdev->sector_size > PAGE_SIZE) + ata_dev_warn(dev, + "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", + sdev->sector_size); + + blk_queue_update_dma_alignment(q, sdev->sector_size - 1); + if (dev->flags & ATA_DFLAG_AN) set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); @@ -902,6 +1167,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); } + blk_queue_flush_queueable(q, false); + + dev->sdev = sdev; return 0; } @@ -971,26 +1239,25 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) } /** - * ata_scsi_change_queue_depth - SCSI callback for queue depth config + * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth + * @ap: ATA port to which the device change the queue depth * @sdev: SCSI device to configure queue depth for * @queue_depth: new queue depth + * @reason: calling context * - * This is libata standard hostt->change_queue_depth callback. - * SCSI will call into this callback when user tries to set queue - * depth via sysfs. + * libsas and libata have different approaches for associating a sdev to + * its ata_port. * - * LOCKING: - * SCSI layer (we don't care) - * - * RETURNS: - * Newly configured queue depth. */ -int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) +int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, + int queue_depth, int reason) { - struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev; unsigned long flags; + if (reason != SCSI_QDEPTH_DEFAULT) + return -EOPNOTSUPP; + if (queue_depth < 1 || queue_depth == sdev->queue_depth) return sdev->queue_depth; @@ -1019,21 +1286,28 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) return queue_depth; } -/* XXX: for spindown warning */ -static void ata_delayed_done_timerfn(unsigned long arg) -{ - struct scsi_cmnd *scmd = (void *)arg; - - scmd->scsi_done(scmd); -} - -/* XXX: for spindown warning */ -static void ata_delayed_done(struct scsi_cmnd *scmd) +/** + * ata_scsi_change_queue_depth - SCSI callback for queue depth config + * @sdev: SCSI device to configure queue depth for + * @queue_depth: new queue depth + * @reason: calling context + * + * This is libata standard hostt->change_queue_depth callback. + * SCSI will call into this callback when user tries to set queue + * depth via sysfs. + * + * LOCKING: + * SCSI layer (we don't care) + * + * RETURNS: + * Newly configured queue depth. + */ +int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, + int reason) { - static struct timer_list timer; + struct ata_port *ap = ata_shost_to_port(sdev->host); - setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd); - mod_timer(&timer, jiffies + 5 * HZ); + return __ata_change_queue_depth(ap, sdev, queue_depth, reason); } /** @@ -1070,12 +1344,6 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) if (((cdb[4] >> 4) & 0xf) != 0) goto invalid_fld; /* power conditions not supported */ - if (qc->dev->horkage & ATA_HORKAGE_SKIP_PM) { - /* the device lacks PM support, finish without doing anything */ - scmd->result = SAM_STAT_GOOD; - return 1; - } - if (cdb[4] & 0x1) { tf->nsect = 1; /* 1 sector, lba=0 */ @@ -1095,32 +1363,16 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ } else { - /* XXX: This is for backward compatibility, will be - * removed. Read Documentation/feature-removal-schedule.txt - * for more info. + /* Some odd clown BIOSen issue spindown on power off (ACPI S4 + * or S5) causing some drives to spin up and down again. */ - if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && - (system_state == SYSTEM_HALT || - system_state == SYSTEM_POWER_OFF)) { - static unsigned long warned; - - if (!test_and_set_bit(0, &warned)) { - ata_dev_printk(qc->dev, KERN_WARNING, - "DISK MIGHT NOT BE SPUN DOWN PROPERLY. " - "UPDATE SHUTDOWN UTILITY\n"); - ata_dev_printk(qc->dev, KERN_WARNING, - "For more info, visit " - "http://linux-ata.org/shutdown.html\n"); - - /* ->scsi_done is not used, use it for - * delayed completion. - */ - scmd->scsi_done = qc->scsidone; - qc->scsidone = ata_delayed_done; - } - scmd->result = SAM_STAT_GOOD; - return 1; - } + if ((qc->ap->flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) && + system_state == SYSTEM_POWER_OFF) + goto skip; + + if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) && + system_entering_hibernation()) + goto skip; /* Issue ATA STANDBY IMMEDIATE command */ tf->command = ATA_CMD_STANDBYNOW1; @@ -1135,10 +1387,13 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) return 0; -invalid_fld: + invalid_fld: ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); /* "Invalid field in cbd" */ return 1; + skip: + scmd->result = SAM_STAT_GOOD; + return 1; } @@ -1423,7 +1678,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) if (unlikely(scmd->cmd_len < 10)) goto invalid_fld; scsi_10_lba_len(cdb, &block, &n_block); - if (unlikely(cdb[1] & (1 << 3))) + if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; break; case READ_6: @@ -1443,7 +1698,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) if (unlikely(scmd->cmd_len < 16)) goto invalid_fld; scsi_16_lba_len(cdb, &block, &n_block); - if (unlikely(cdb[1] & (1 << 3))) + if (cdb[1] & (1 << 3)) tf_flags |= ATA_TFLAG_FUA; break; default: @@ -1463,7 +1718,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) goto nothing_to_do; qc->flags |= ATA_QCFLAG_IO; - qc->nbytes = n_block * ATA_SECT_SIZE; + qc->nbytes = n_block * scmd->device->sector_size; rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, qc->tag); @@ -1497,10 +1752,12 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) /* For ATA pass thru (SAT) commands, generate a sense block if * user mandated it or if there's an error. Note that if we - * generate because the user forced us to, a check condition - * is generated and the ATA register values are returned + * generate because the user forced us to [CK_COND =1], a check + * condition is generated and the ATA register values are returned * whether the command completed successfully or not. If there - * was no error, SK, ASC and ASCQ will all be zero. + * was no error, we use the following sense data: + * sk = RECOVERED ERROR + * asc,ascq = ATA PASS-THROUGH INFORMATION AVAILABLE */ if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && ((cdb[2] & 0x20) || need_sense)) { @@ -1519,14 +1776,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) } } - /* XXX: track spindown state for spindown skipping and warning */ - if (unlikely(qc->tf.command == ATA_CMD_STANDBY || - qc->tf.command == ATA_CMD_STANDBYNOW1)) - qc->dev->flags |= ATA_DFLAG_SPUNDOWN; - else if (likely(system_state != SYSTEM_HALT && - system_state != SYSTEM_POWER_OFF)) - qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN; - if (need_sense && !ap->ops->error_handler) ata_dump_status(ap->print_id, &qc->result_tf); @@ -1539,7 +1788,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) * ata_scsi_translate - Translate then issue SCSI command to ATA device * @dev: ATA device to which the command is addressed * @cmd: SCSI command to execute - * @done: SCSI command completion function * @xlat_func: Actor which translates @cmd to an ATA taskfile * * Our ->queuecommand() function has decided that the SCSI @@ -1563,7 +1811,6 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) * needs to be deferred. */ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *), ata_xlat_func_t xlat_func) { struct ata_port *ap = dev->link->ap; @@ -1572,7 +1819,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, VPRINTK("ENTER\n"); - qc = ata_scsi_qc_new(dev, cmd, done); + qc = ata_scsi_qc_new(dev, cmd); if (!qc) goto err_mem; @@ -1580,8 +1827,7 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, if (cmd->sc_data_direction == DMA_FROM_DEVICE || cmd->sc_data_direction == DMA_TO_DEVICE) { if (unlikely(scsi_bufflen(cmd) < 1)) { - ata_dev_printk(dev, KERN_WARNING, - "WARNING: zero len r/w req\n"); + ata_dev_warn(dev, "WARNING: zero len r/w req\n"); goto err_did; } @@ -1608,14 +1854,14 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, early_finish: ata_qc_free(qc); - qc->scsidone(cmd); + cmd->scsi_done(cmd); DPRINTK("EXIT - early finish (good or error)\n"); return 0; err_did: ata_qc_free(qc); cmd->result = (DID_ERROR << 16); - qc->scsidone(cmd); + cmd->scsi_done(cmd); err_mem: DPRINTK("EXIT - internal\n"); return 0; @@ -1632,52 +1878,48 @@ defer: /** * ata_scsi_rbuf_get - Map response buffer. * @cmd: SCSI command containing buffer to be mapped. - * @buf_out: Pointer to mapped area. + * @flags: unsigned long variable to store irq enable status + * @copy_in: copy in from user buffer * - * Maps buffer contained within SCSI command @cmd. + * Prepare buffer for simulated SCSI commands. * * LOCKING: - * spin_lock_irqsave(host lock) + * spin_lock_irqsave(ata_scsi_rbuf_lock) on success * * RETURNS: - * Length of response buffer. + * Pointer to response buffer. */ - -static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out) +static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in, + unsigned long *flags) { - u8 *buf; - unsigned int buflen; + spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags); - struct scatterlist *sg = scsi_sglist(cmd); - - if (sg) { - buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; - buflen = sg->length; - } else { - buf = NULL; - buflen = 0; - } - - *buf_out = buf; - return buflen; + memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE); + if (copy_in) + sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), + ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE); + return ata_scsi_rbuf; } /** * ata_scsi_rbuf_put - Unmap response buffer. * @cmd: SCSI command containing buffer to be unmapped. - * @buf: buffer to unmap + * @copy_out: copy out result + * @flags: @flags passed to ata_scsi_rbuf_get() * - * Unmaps response buffer contained within @cmd. + * Returns rbuf buffer. The result is copied to @cmd's buffer if + * @copy_back is true. * * LOCKING: - * spin_lock_irqsave(host lock) + * Unlocks ata_scsi_rbuf_lock. */ - -static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) +static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out, + unsigned long *flags) { - struct scatterlist *sg = scsi_sglist(cmd); - if (sg) - kunmap_atomic(buf - sg->offset, KM_IRQ0); + if (copy_out) + sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd), + ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE); + spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags); } /** @@ -1695,24 +1937,17 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) * LOCKING: * spin_lock_irqsave(host lock) */ - -void ata_scsi_rbuf_fill(struct ata_scsi_args *args, - unsigned int (*actor) (struct ata_scsi_args *args, - u8 *rbuf, unsigned int buflen)) +static void ata_scsi_rbuf_fill(struct ata_scsi_args *args, + unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf)) { u8 *rbuf; - unsigned int buflen, rc; + unsigned int rc; struct scsi_cmnd *cmd = args->cmd; unsigned long flags; - local_irq_save(flags); - - buflen = ata_scsi_rbuf_get(cmd, &rbuf); - memset(rbuf, 0, buflen); - rc = actor(args, rbuf, buflen); - ata_scsi_rbuf_put(cmd, rbuf); - - local_irq_restore(flags); + rbuf = ata_scsi_rbuf_get(cmd, false, &flags); + rc = actor(args, rbuf); + ata_scsi_rbuf_put(cmd, rc == 0, &flags); if (rc == 0) cmd->result = SAM_STAT_GOOD; @@ -1720,26 +1955,9 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, } /** - * ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer - * @idx: byte index into SCSI response buffer - * @val: value to set - * - * To be used by SCSI command simulator functions. This macros - * expects two local variables, u8 *rbuf and unsigned int buflen, - * are in scope. - * - * LOCKING: - * None. - */ -#define ATA_SCSI_RBUF_SET(idx, val) do { \ - if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \ - } while (0) - -/** * ata_scsiop_inq_std - Simulate INQUIRY command * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Returns standard device identification data associated * with non-VPD INQUIRY command output. @@ -1747,10 +1965,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args, * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) { + const u8 versions[] = { + 0x60, /* SAM-3 (no version claimed) */ + + 0x03, + 0x20, /* SBC-2 (no version claimed) */ + + 0x02, + 0x60 /* SPC-3 (no version claimed) */ + }; u8 hdr[] = { TYPE_DISK, 0, @@ -1759,35 +1984,25 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 95 - 4 }; + VPRINTK("ENTER\n"); + /* set scsi removeable (RMB) bit per ata bit */ if (ata_id_removeable(args->id)) hdr[1] |= (1 << 7); - VPRINTK("ENTER\n"); - memcpy(rbuf, hdr, sizeof(hdr)); + memcpy(&rbuf[8], "ATA ", 8); + ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); - if (buflen > 35) { - memcpy(&rbuf[8], "ATA ", 8); - ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); + /* From SAT, use last 2 words from fw rev unless they are spaces */ + ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV + 2, 4); + if (strncmp(&rbuf[32], " ", 4) == 0) ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); - if (rbuf[32] == 0 || rbuf[32] == ' ') - memcpy(&rbuf[32], "n/a ", 4); - } - - if (buflen > 63) { - const u8 versions[] = { - 0x60, /* SAM-3 (no version claimed) */ - 0x03, - 0x20, /* SBC-2 (no version claimed) */ + if (rbuf[32] == 0 || rbuf[32] == ' ') + memcpy(&rbuf[32], "n/a ", 4); - 0x02, - 0x60 /* SPC-3 (no version claimed) */ - }; - - memcpy(rbuf + 59, versions, sizeof(versions)); - } + memcpy(rbuf + 59, versions, sizeof(versions)); return 0; } @@ -1796,27 +2011,26 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Returns list of inquiry VPD pages available. * * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) { const u8 pages[] = { 0x00, /* page 0x00, this page */ 0x80, /* page 0x80, unit serial no page */ - 0x83 /* page 0x83, device ident page */ + 0x83, /* page 0x83, device ident page */ + 0x89, /* page 0x89, ata info page */ + 0xb0, /* page 0xb0, block limits page */ + 0xb1, /* page 0xb1, block device characteristics page */ + 0xb2, /* page 0xb2, thin provisioning page */ }; - rbuf[3] = sizeof(pages); /* number of supported VPD pages */ - - if (buflen > 6) - memcpy(rbuf + 4, pages, sizeof(pages)); + rbuf[3] = sizeof(pages); /* number of supported VPD pages */ + memcpy(rbuf + 4, pages, sizeof(pages)); return 0; } @@ -1824,16 +2038,13 @@ unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Returns ATA device serial number. * * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf) { const u8 hdr[] = { 0, @@ -1841,12 +2052,10 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, 0, ATA_ID_SERNO_LEN, /* page len */ }; - memcpy(rbuf, hdr, sizeof(hdr)); - - if (buflen > (ATA_ID_SERNO_LEN + 4 - 1)) - ata_id_string(args->id, (unsigned char *) &rbuf[4], - ATA_ID_SERNO, ATA_ID_SERNO_LEN); + memcpy(rbuf, hdr, sizeof(hdr)); + ata_id_string(args->id, (unsigned char *) &rbuf[4], + ATA_ID_SERNO, ATA_ID_SERNO_LEN); return 0; } @@ -1854,7 +2063,6 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Yields two logical unit device identification designators: * - vendor specific ASCII containing the ATA serial number @@ -1864,40 +2072,47 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf) { - int num; const int sat_model_serial_desc_len = 68; + int num; rbuf[1] = 0x83; /* this page code */ num = 4; - if (buflen > (ATA_ID_SERNO_LEN + num + 3)) { - /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ - rbuf[num + 0] = 2; - rbuf[num + 3] = ATA_ID_SERNO_LEN; - num += 4; - ata_id_string(args->id, (unsigned char *) rbuf + num, - ATA_ID_SERNO, ATA_ID_SERNO_LEN); - num += ATA_ID_SERNO_LEN; - } - if (buflen > (sat_model_serial_desc_len + num + 3)) { - /* SAT defined lu model and serial numbers descriptor */ - /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ - rbuf[num + 0] = 2; - rbuf[num + 1] = 1; - rbuf[num + 3] = sat_model_serial_desc_len; + /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ + rbuf[num + 0] = 2; + rbuf[num + 3] = ATA_ID_SERNO_LEN; + num += 4; + ata_id_string(args->id, (unsigned char *) rbuf + num, + ATA_ID_SERNO, ATA_ID_SERNO_LEN); + num += ATA_ID_SERNO_LEN; + + /* SAT defined lu model and serial numbers descriptor */ + /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ + rbuf[num + 0] = 2; + rbuf[num + 1] = 1; + rbuf[num + 3] = sat_model_serial_desc_len; + num += 4; + memcpy(rbuf + num, "ATA ", 8); + num += 8; + ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD, + ATA_ID_PROD_LEN); + num += ATA_ID_PROD_LEN; + ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO, + ATA_ID_SERNO_LEN); + num += ATA_ID_SERNO_LEN; + + if (ata_id_has_wwn(args->id)) { + /* SAT defined lu world wide name */ + /* piv=0, assoc=lu, code_set=binary, designator=NAA */ + rbuf[num + 0] = 1; + rbuf[num + 1] = 3; + rbuf[num + 3] = ATA_ID_WWN_LEN; num += 4; - memcpy(rbuf + num, "ATA ", 8); - num += 8; ata_id_string(args->id, (unsigned char *) rbuf + num, - ATA_ID_PROD, ATA_ID_PROD_LEN); - num += ATA_ID_PROD_LEN; - ata_id_string(args->id, (unsigned char *) rbuf + num, - ATA_ID_SERNO, ATA_ID_SERNO_LEN); - num += ATA_ID_SERNO_LEN; + ATA_ID_WWN, ATA_ID_WWN_LEN); + num += ATA_ID_WWN_LEN; } rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ return 0; @@ -1907,35 +2122,25 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Yields SAT-specified ATA VPD page. * * LOCKING: * spin_lock_irqsave(host lock) */ - -static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) { - u8 pbuf[60]; struct ata_taskfile tf; - unsigned int i; - if (!buflen) - return 0; - - memset(&pbuf, 0, sizeof(pbuf)); memset(&tf, 0, sizeof(tf)); - pbuf[1] = 0x89; /* our page code */ - pbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ - pbuf[3] = (0x238 & 0xff); + rbuf[1] = 0x89; /* our page code */ + rbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ + rbuf[3] = (0x238 & 0xff); - memcpy(&pbuf[8], "linux ", 8); - memcpy(&pbuf[16], "libata ", 16); - memcpy(&pbuf[32], DRV_VERSION, 4); - ata_id_string(args->id, &pbuf[32], ATA_ID_FW_REV, 4); + memcpy(&rbuf[8], "linux ", 8); + memcpy(&rbuf[16], "libata ", 16); + memcpy(&rbuf[32], DRV_VERSION, 4); /* we don't store the ATA device signature, so we fake it */ @@ -1943,19 +2148,70 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf, tf.lbal = 0x1; tf.nsect = 0x1; - ata_tf_to_fis(&tf, 0, 1, &pbuf[36]); /* TODO: PMP? */ - pbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ + ata_tf_to_fis(&tf, 0, 1, &rbuf[36]); /* TODO: PMP? */ + rbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ - pbuf[56] = ATA_CMD_ID_ATA; + rbuf[56] = ATA_CMD_ID_ATA; - i = min(buflen, 60U); - memcpy(rbuf, &pbuf[0], i); - buflen -= i; + memcpy(&rbuf[60], &args->id[0], 512); + return 0; +} - if (!buflen) - return 0; +static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) +{ + u16 min_io_sectors; + + rbuf[1] = 0xb0; + rbuf[3] = 0x3c; /* required VPD size with unmap support */ + + /* + * Optimal transfer length granularity. + * + * This is always one physical block, but for disks with a smaller + * logical than physical sector size we need to figure out what the + * latter is. + */ + min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id); + put_unaligned_be16(min_io_sectors, &rbuf[6]); + + /* + * Optimal unmap granularity. + * + * The ATA spec doesn't even know about a granularity or alignment + * for the TRIM command. We can leave away most of the unmap related + * VPD page entries, but we have specifify a granularity to signal + * that we support some form of unmap - in thise case via WRITE SAME + * with the unmap bit set. + */ + if (ata_id_has_trim(args->id)) { + put_unaligned_be64(65535 * 512 / 8, &rbuf[36]); + put_unaligned_be32(1, &rbuf[28]); + } + + return 0; +} + +static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf) +{ + int form_factor = ata_id_form_factor(args->id); + int media_rotation_rate = ata_id_rotation_rate(args->id); + + rbuf[1] = 0xb1; + rbuf[3] = 0x3c; + rbuf[4] = media_rotation_rate >> 8; + rbuf[5] = media_rotation_rate; + rbuf[7] = form_factor; + + return 0; +} + +static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf) +{ + /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ + rbuf[1] = 0xb2; + rbuf[3] = 0x4; + rbuf[5] = 1 << 6; /* TPWS */ - memcpy(&rbuf[60], &args->id[0], min(buflen, 512U)); return 0; } @@ -1963,7 +2219,6 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_noop - Command handler that simply returns success. * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * No operation. Simply returns success to caller, to indicate * that the caller should successfully complete this SCSI command. @@ -1971,47 +2226,40 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf, * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf) { VPRINTK("ENTER\n"); return 0; } /** - * ata_msense_push - Push data onto MODE SENSE data output buffer - * @ptr_io: (input/output) Location to store more output data - * @last: End of output data buffer - * @buf: Pointer to BLOB being added to output buffer - * @buflen: Length of BLOB + * modecpy - Prepare response for MODE SENSE + * @dest: output buffer + * @src: data being copied + * @n: length of mode page + * @changeable: whether changeable parameters are requested * - * Store MODE SENSE data on an output buffer. + * Generate a generic MODE SENSE page for either current or changeable + * parameters. * * LOCKING: * None. */ - -static void ata_msense_push(u8 **ptr_io, const u8 *last, - const u8 *buf, unsigned int buflen) +static void modecpy(u8 *dest, const u8 *src, int n, bool changeable) { - u8 *ptr = *ptr_io; - - if ((ptr + buflen - 1) > last) - return; - - memcpy(ptr, buf, buflen); - - ptr += buflen; - - *ptr_io = ptr; + if (changeable) { + memcpy(dest, src, 2); + memset(dest + 2, 0, n - 2); + } else { + memcpy(dest, src, n); + } } /** * ata_msense_caching - Simulate MODE SENSE caching info page * @id: device IDENTIFY data - * @ptr_io: (input/output) Location to store more output data - * @last: End of output data buffer + * @buf: output buffer + * @changeable: whether changeable parameters are requested * * Generate a caching info page, which conditionally indicates * write caching to the SCSI layer, depending on device @@ -2020,58 +2268,46 @@ static void ata_msense_push(u8 **ptr_io, const u8 *last, * LOCKING: * None. */ - -static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io, - const u8 *last) +static unsigned int ata_msense_caching(u16 *id, u8 *buf, bool changeable) { - u8 page[CACHE_MPAGE_LEN]; - - memcpy(page, def_cache_mpage, sizeof(page)); - if (ata_id_wcache_enabled(id)) - page[2] |= (1 << 2); /* write cache enable */ - if (!ata_id_rahead_enabled(id)) - page[12] |= (1 << 5); /* disable read ahead */ - - ata_msense_push(ptr_io, last, page, sizeof(page)); - return sizeof(page); + modecpy(buf, def_cache_mpage, sizeof(def_cache_mpage), changeable); + if (changeable || ata_id_wcache_enabled(id)) + buf[2] |= (1 << 2); /* write cache enable */ + if (!changeable && !ata_id_rahead_enabled(id)) + buf[12] |= (1 << 5); /* disable read ahead */ + return sizeof(def_cache_mpage); } /** * ata_msense_ctl_mode - Simulate MODE SENSE control mode page - * @dev: Device associated with this MODE SENSE command - * @ptr_io: (input/output) Location to store more output data - * @last: End of output data buffer + * @buf: output buffer + * @changeable: whether changeable parameters are requested * * Generate a generic MODE SENSE control mode page. * * LOCKING: * None. */ - -static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last) +static unsigned int ata_msense_ctl_mode(u8 *buf, bool changeable) { - ata_msense_push(ptr_io, last, def_control_mpage, - sizeof(def_control_mpage)); + modecpy(buf, def_control_mpage, sizeof(def_control_mpage), changeable); return sizeof(def_control_mpage); } /** * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page - * @dev: Device associated with this MODE SENSE command - * @ptr_io: (input/output) Location to store more output data - * @last: End of output data buffer + * @buf: output buffer + * @changeable: whether changeable parameters are requested * * Generate a generic MODE SENSE r/w error recovery page. * * LOCKING: * None. */ - -static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last) +static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable) { - - ata_msense_push(ptr_io, last, def_rw_recovery_mpage, - sizeof(def_rw_recovery_mpage)); + modecpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage), + changeable); return sizeof(def_rw_recovery_mpage); } @@ -2103,7 +2339,6 @@ static int ata_dev_supports_fua(u16 *id) * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Simulate MODE SENSE commands. Assume this is invoked for direct * access devices (e.g. disks) only. There should be no block @@ -2112,19 +2347,17 @@ static int ata_dev_supports_fua(u16 *id) * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf) { struct ata_device *dev = args->dev; - u8 *scsicmd = args->cmd->cmnd, *p, *last; + u8 *scsicmd = args->cmd->cmnd, *p = rbuf; const u8 sat_blk_desc[] = { 0, 0, 0, 0, /* number of blocks: sat unspecified */ 0, 0, 0x2, 0x0 /* block length: 512 bytes */ }; u8 pg, spg; - unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; + unsigned int ebd, page_control, six_byte; u8 dpofua; VPRINTK("ENTER\n"); @@ -2138,26 +2371,19 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, page_control = scsicmd[2] >> 6; switch (page_control) { case 0: /* current */ + case 1: /* changeable */ + case 2: /* defaults */ break; /* supported */ case 3: /* saved */ goto saving_not_supp; - case 1: /* changeable */ - case 2: /* defaults */ default: goto invalid_fld; } - if (six_byte) { - output_len = 4 + (ebd ? 8 : 0); - alloc_len = scsicmd[4]; - } else { - output_len = 8 + (ebd ? 8 : 0); - alloc_len = (scsicmd[7] << 8) + scsicmd[8]; - } - minlen = (alloc_len < buflen) ? alloc_len : buflen; - - p = rbuf + output_len; - last = rbuf + minlen - 1; + if (six_byte) + p += 4 + (ebd ? 8 : 0); + else + p += 8 + (ebd ? 8 : 0); pg = scsicmd[2] & 0x3f; spg = scsicmd[3]; @@ -2170,61 +2396,48 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, switch(pg) { case RW_RECOVERY_MPAGE: - output_len += ata_msense_rw_recovery(&p, last); + p += ata_msense_rw_recovery(p, page_control == 1); break; case CACHE_MPAGE: - output_len += ata_msense_caching(args->id, &p, last); + p += ata_msense_caching(args->id, p, page_control == 1); break; - case CONTROL_MPAGE: { - output_len += ata_msense_ctl_mode(&p, last); + case CONTROL_MPAGE: + p += ata_msense_ctl_mode(p, page_control == 1); break; - } case ALL_MPAGES: - output_len += ata_msense_rw_recovery(&p, last); - output_len += ata_msense_caching(args->id, &p, last); - output_len += ata_msense_ctl_mode(&p, last); + p += ata_msense_rw_recovery(p, page_control == 1); + p += ata_msense_caching(args->id, p, page_control == 1); + p += ata_msense_ctl_mode(p, page_control == 1); break; default: /* invalid page code */ goto invalid_fld; } - if (minlen < 1) - return 0; - dpofua = 0; if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) && (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) dpofua = 1 << 4; if (six_byte) { - output_len--; - rbuf[0] = output_len; - if (minlen > 2) - rbuf[2] |= dpofua; + rbuf[0] = p - rbuf - 1; + rbuf[2] |= dpofua; if (ebd) { - if (minlen > 3) - rbuf[3] = sizeof(sat_blk_desc); - if (minlen > 11) - memcpy(rbuf + 4, sat_blk_desc, - sizeof(sat_blk_desc)); + rbuf[3] = sizeof(sat_blk_desc); + memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc)); } } else { - output_len -= 2; + unsigned int output_len = p - rbuf - 2; + rbuf[0] = output_len >> 8; - if (minlen > 1) - rbuf[1] = output_len; - if (minlen > 3) - rbuf[3] |= dpofua; + rbuf[1] = output_len; + rbuf[3] |= dpofua; if (ebd) { - if (minlen > 7) - rbuf[7] = sizeof(sat_blk_desc); - if (minlen > 15) - memcpy(rbuf + 8, sat_blk_desc, - sizeof(sat_blk_desc)); + rbuf[7] = sizeof(sat_blk_desc); + memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc)); } } return 0; @@ -2244,17 +2457,23 @@ saving_not_supp: * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Simulate READ CAPACITY commands. * * LOCKING: * None. */ -unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) { - u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ + struct ata_device *dev = args->dev; + u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ + u32 sector_size; /* physical sector size in bytes */ + u8 log2_per_phys; + u16 lowest_aligned; + + sector_size = ata_id_logical_sector_size(dev->id); + log2_per_phys = ata_id_log2_per_physical_sector(dev->id); + lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); VPRINTK("ENTER\n"); @@ -2263,28 +2482,44 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, last_lba = 0xffffffff; /* sector count, 32-bit */ - ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3)); - ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2)); - ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1)); - ATA_SCSI_RBUF_SET(3, last_lba); + rbuf[0] = last_lba >> (8 * 3); + rbuf[1] = last_lba >> (8 * 2); + rbuf[2] = last_lba >> (8 * 1); + rbuf[3] = last_lba; /* sector size */ - ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8); - ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE & 0xff); + rbuf[4] = sector_size >> (8 * 3); + rbuf[5] = sector_size >> (8 * 2); + rbuf[6] = sector_size >> (8 * 1); + rbuf[7] = sector_size; } else { /* sector count, 64-bit */ - ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7)); - ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6)); - ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5)); - ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4)); - ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3)); - ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2)); - ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1)); - ATA_SCSI_RBUF_SET(7, last_lba); + rbuf[0] = last_lba >> (8 * 7); + rbuf[1] = last_lba >> (8 * 6); + rbuf[2] = last_lba >> (8 * 5); + rbuf[3] = last_lba >> (8 * 4); + rbuf[4] = last_lba >> (8 * 3); + rbuf[5] = last_lba >> (8 * 2); + rbuf[6] = last_lba >> (8 * 1); + rbuf[7] = last_lba; /* sector size */ - ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8); - ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE & 0xff); + rbuf[ 8] = sector_size >> (8 * 3); + rbuf[ 9] = sector_size >> (8 * 2); + rbuf[10] = sector_size >> (8 * 1); + rbuf[11] = sector_size; + + rbuf[12] = 0; + rbuf[13] = log2_per_phys; + rbuf[14] = (lowest_aligned >> 8) & 0x3f; + rbuf[15] = lowest_aligned; + + if (ata_id_has_trim(args->id)) { + rbuf[14] |= 0x80; /* TPE */ + + if (ata_id_has_zero_after_trim(args->id)) + rbuf[14] |= 0x40; /* TPRZ */ + } } return 0; @@ -2294,16 +2529,13 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, * ata_scsiop_report_luns - Simulate REPORT LUNS command * @args: device IDENTIFY data / SCSI command of interest. * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. - * @buflen: Response buffer length. * * Simulate REPORT LUNS command. * * LOCKING: * spin_lock_irqsave(host lock) */ - -unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, - unsigned int buflen) +static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf) { VPRINTK("ENTER\n"); rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ @@ -2311,57 +2543,6 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, return 0; } -/** - * ata_scsi_set_sense - Set SCSI sense data and status - * @cmd: SCSI request to be handled - * @sk: SCSI-defined sense key - * @asc: SCSI-defined additional sense code - * @ascq: SCSI-defined additional sense code qualifier - * - * Helper function that builds a valid fixed format, current - * response code and the given sense key (sk), additional sense - * code (asc) and additional sense code qualifier (ascq) with - * a SCSI command status of %SAM_STAT_CHECK_CONDITION and - * DRIVER_SENSE set in the upper bits of scsi_cmnd::result . - * - * LOCKING: - * Not required - */ - -void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) -{ - cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; - - cmd->sense_buffer[0] = 0x70; /* fixed format, current */ - cmd->sense_buffer[2] = sk; - cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ - cmd->sense_buffer[12] = asc; - cmd->sense_buffer[13] = ascq; -} - -/** - * ata_scsi_badcmd - End a SCSI request with an error - * @cmd: SCSI request to be handled - * @done: SCSI command completion function - * @asc: SCSI-defined additional sense code - * @ascq: SCSI-defined additional sense code qualifier - * - * Helper function that completes a SCSI command with - * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST - * and the specified additional sense codes. - * - * LOCKING: - * spin_lock_irqsave(host lock) - */ - -void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) -{ - DPRINTK("ENTER\n"); - ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq); - - done(cmd); -} - static void atapi_sense_complete(struct ata_queued_cmd *qc) { if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { @@ -2393,7 +2574,10 @@ static void atapi_request_sense(struct ata_queued_cmd *qc) /* FIXME: is this needed? */ memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); - ap->ops->tf_read(ap, &qc->tf); +#ifdef CONFIG_ATA_SFF + if (ap->ops->sff_tf_read) + ap->ops->sff_tf_read(ap, &qc->tf); +#endif /* fill these in, for the case where they are -not- overwritten */ cmd->sense_buffer[0] = 0x70; @@ -2459,8 +2643,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) * * If door lock fails, always clear sdev->locked to * avoid this infinite loop. + * + * This may happen before SCSI scan is complete. Make + * sure qc->dev->sdev isn't NULL before dereferencing. */ - if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) + if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) qc->dev->sdev->locked = 0; qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; @@ -2485,13 +2672,10 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) u8 *scsicmd = cmd->cmnd; if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { - u8 *buf = NULL; - unsigned int buflen; unsigned long flags; + u8 *buf; - local_irq_save(flags); - - buflen = ata_scsi_rbuf_get(cmd, &buf); + buf = ata_scsi_rbuf_get(cmd, true, &flags); /* ATAPI devices typically report zero for their SCSI version, * and sometimes deviate from the spec WRT response data @@ -2506,9 +2690,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) buf[3] = 0x32; } - ata_scsi_rbuf_put(cmd, buf); - - local_irq_restore(flags); + ata_scsi_rbuf_put(cmd, true, &flags); } cmd->result = SAM_STAT_GOOD; @@ -2531,8 +2713,8 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; struct ata_device *dev = qc->dev; - int using_pio = (dev->flags & ATA_DFLAG_PIO); int nodata = (scmd->sc_data_direction == DMA_NONE); + int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); unsigned int nbytes; memset(qc->cdb, 0, dev->cdb_len); @@ -2550,7 +2732,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) ata_qc_set_pc_nbytes(qc); /* check whether ATAPI DMA is safe */ - if (!using_pio && ata_check_atapi_dma(qc)) + if (!nodata && !using_pio && atapi_check_dma(qc)) using_pio = 1; /* Some controller variants snoop this value for Packet @@ -2590,13 +2772,11 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) qc->tf.lbam = (nbytes & 0xFF); qc->tf.lbah = (nbytes >> 8); - if (using_pio || nodata) { - /* no data, or PIO data xfer */ - if (nodata) - qc->tf.protocol = ATAPI_PROT_NODATA; - else - qc->tf.protocol = ATAPI_PROT_PIO; - } else { + if (nodata) + qc->tf.protocol = ATAPI_PROT_NODATA; + else if (using_pio) + qc->tf.protocol = ATAPI_PROT_PIO; + else { /* DMA data xfer */ qc->tf.protocol = ATAPI_PROT_DMA; qc->tf.feature |= ATAPI_PKT_DMA; @@ -2615,7 +2795,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) { - if (ap->nr_pmp_links == 0) { + if (!sata_pmp_attached(ap)) { if (likely(devno < ata_link_max_devices(&ap->link))) return &ap->link.device[devno]; } else { @@ -2632,7 +2812,7 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, int devno; /* skip commands not addressed to targets we simulate */ - if (ap->nr_pmp_links == 0) { + if (!sata_pmp_attached(ap)) { if (unlikely(scsidev->channel || scsidev->lun)) return NULL; devno = scsidev->id; @@ -2646,36 +2826,6 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, } /** - * ata_scsi_dev_enabled - determine if device is enabled - * @dev: ATA device - * - * Determine if commands should be sent to the specified device. - * - * LOCKING: - * spin_lock_irqsave(host lock) - * - * RETURNS: - * 0 if commands are not allowed / 1 if commands are allowed - */ - -static int ata_scsi_dev_enabled(struct ata_device *dev) -{ - if (unlikely(!ata_dev_enabled(dev))) - return 0; - - if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) { - if (unlikely(dev->class == ATA_DEV_ATAPI)) { - ata_dev_printk(dev, KERN_WARNING, - "WARNING: ATAPI is %s, device ignored.\n", - atapi_enabled ? "not supported with this driver" : "disabled"); - return 0; - } - } - - return 1; -} - -/** * ata_scsi_find_dev - lookup ata_device from scsi_cmnd * @ap: ATA port to which the device is attached * @scsidev: SCSI device from which we derive the ATA device @@ -2696,7 +2846,7 @@ ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) { struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); - if (unlikely(!dev || !ata_scsi_dev_enabled(dev))) + if (unlikely(!dev || !ata_dev_enabled(dev))) return NULL; return dev; @@ -2759,28 +2909,6 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) goto invalid_fld; /* - * Filter TPM commands by default. These provide an - * essentially uncontrolled encrypted "back door" between - * applications and the disk. Set libata.allow_tpm=1 if you - * have a real reason for wanting to use them. This ensures - * that installed software cannot easily mess stuff up without - * user intent. DVR type users will probably ship with this enabled - * for movie content management. - * - * Note that for ATA8 we can issue a DCS change and DCS freeze lock - * for this and should do in future but that it is not sufficient as - * DCS is an optional feature set. Thus we also do the software filter - * so that we comply with the TC consortium stated goal that the user - * can turn off TC features of their system. - */ - if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) - goto invalid_fld; - - /* We may not issue DMA commands if no DMA mode is set */ - if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) - goto invalid_fld; - - /* * 12 and 16 byte CDBs use different offsets to * provide the various register values. */ @@ -2829,6 +2957,79 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) tf->device = dev->devno ? tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; + switch (tf->command) { + /* READ/WRITE LONG use a non-standard sect_size */ + case ATA_CMD_READ_LONG: + case ATA_CMD_READ_LONG_ONCE: + case ATA_CMD_WRITE_LONG: + case ATA_CMD_WRITE_LONG_ONCE: + if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) + goto invalid_fld; + qc->sect_size = scsi_bufflen(scmd); + break; + + /* commands using reported Logical Block size (e.g. 512 or 4K) */ + case ATA_CMD_CFA_WRITE_NE: + case ATA_CMD_CFA_TRANS_SECT: + case ATA_CMD_CFA_WRITE_MULT_NE: + /* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */ + case ATA_CMD_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ_QUEUED: + /* XXX: case ATA_CMD_READ_QUEUED_EXT: */ + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_MULTI: + case ATA_CMD_READ_MULTI_EXT: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_READ_STREAM_DMA_EXT: + case ATA_CMD_READ_STREAM_EXT: + case ATA_CMD_VERIFY: + case ATA_CMD_VERIFY_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE_FUA_EXT: + case ATA_CMD_WRITE_QUEUED: + case ATA_CMD_WRITE_QUEUED_FUA_EXT: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_MULTI: + case ATA_CMD_WRITE_MULTI_EXT: + case ATA_CMD_WRITE_MULTI_FUA_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + case ATA_CMD_WRITE_STREAM_DMA_EXT: + case ATA_CMD_WRITE_STREAM_EXT: + qc->sect_size = scmd->device->sector_size; + break; + + /* Everything else uses 512 byte "sectors" */ + default: + qc->sect_size = ATA_SECT_SIZE; + } + + /* + * Set flags so that all registers will be written, pass on + * write indication (used for PIO/DMA setup), result TF is + * copied back and we don't whine too much about its failure. + */ + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + if (scmd->sc_data_direction == DMA_TO_DEVICE) + tf->flags |= ATA_TFLAG_WRITE; + + qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; + + /* + * Set transfer length. + * + * TODO: find out if we need to do more here to + * cover scatter/gather case. + */ + ata_qc_set_pc_nbytes(qc); + + /* We may not issue DMA commands if no DMA mode is set */ + if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) + goto invalid_fld; + /* sanity check for pio multi commands */ if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) goto invalid_fld; @@ -2840,21 +3041,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * with the cached multi_count of libata */ if (multi_count != dev->multi_count) - ata_dev_printk(dev, KERN_WARNING, - "invalid multi_count %u ignored\n", - multi_count); - } - - /* READ/WRITE LONG use a non-standard sect_size */ - qc->sect_size = ATA_SECT_SIZE; - switch (tf->command) { - case ATA_CMD_READ_LONG: - case ATA_CMD_READ_LONG_ONCE: - case ATA_CMD_WRITE_LONG: - case ATA_CMD_WRITE_LONG_ONCE: - if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) - goto invalid_fld; - qc->sect_size = scsi_bufflen(scmd); + ata_dev_warn(dev, "invalid multi_count %u ignored\n", + multi_count); } /* @@ -2864,30 +3052,92 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) * controller (i.e. the reason for ->set_piomode(), * ->set_dmamode(), and ->post_set_mode() hooks). */ - if ((tf->command == ATA_CMD_SET_FEATURES) - && (tf->feature == SETFEATURES_XFER)) + if (tf->command == ATA_CMD_SET_FEATURES && + tf->feature == SETFEATURES_XFER) goto invalid_fld; /* - * Set flags so that all registers will be written, - * and pass on write indication (used for PIO/DMA - * setup.) + * Filter TPM commands by default. These provide an + * essentially uncontrolled encrypted "back door" between + * applications and the disk. Set libata.allow_tpm=1 if you + * have a real reason for wanting to use them. This ensures + * that installed software cannot easily mess stuff up without + * user intent. DVR type users will probably ship with this enabled + * for movie content management. + * + * Note that for ATA8 we can issue a DCS change and DCS freeze lock + * for this and should do in future but that it is not sufficient as + * DCS is an optional feature set. Thus we also do the software filter + * so that we comply with the TC consortium stated goal that the user + * can turn off TC features of their system. */ - tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE); + if (tf->command >= 0x5C && tf->command <= 0x5F && !libata_allow_tpm) + goto invalid_fld; - if (scmd->sc_data_direction == DMA_TO_DEVICE) - tf->flags |= ATA_TFLAG_WRITE; + return 0; + + invalid_fld: + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00); + /* "Invalid field in cdb" */ + return 1; +} + +static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) +{ + struct ata_taskfile *tf = &qc->tf; + struct scsi_cmnd *scmd = qc->scsicmd; + struct ata_device *dev = qc->dev; + const u8 *cdb = scmd->cmnd; + u64 block; + u32 n_block; + u32 size; + void *buf; + + /* we may not issue DMA commands if no DMA mode is set */ + if (unlikely(!dev->dma_mode)) + goto invalid_fld; + + if (unlikely(scmd->cmd_len < 16)) + goto invalid_fld; + scsi_16_lba_len(cdb, &block, &n_block); + + /* for now we only support WRITE SAME with the unmap bit set */ + if (unlikely(!(cdb[1] & 0x8))) + goto invalid_fld; /* - * Set transfer length. - * - * TODO: find out if we need to do more here to - * cover scatter/gather case. + * WRITE SAME always has a sector sized buffer as payload, this + * should never be a multiple entry S/G list. */ - ata_qc_set_pc_nbytes(qc); + if (!scsi_sg_count(scmd)) + goto invalid_fld; - /* request result TF and be quiet about device error */ - qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET; + buf = page_address(sg_page(scsi_sglist(scmd))); + size = ata_set_lba_range_entries(buf, 512, block, n_block); + + if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) { + /* Newer devices support queued TRIM commands */ + tf->protocol = ATA_PROT_NCQ; + tf->command = ATA_CMD_FPDMA_SEND; + tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; + tf->nsect = qc->tag << 3; + tf->hob_feature = (size / 512) >> 8; + tf->feature = size / 512; + + tf->auxiliary = 1; + } else { + tf->protocol = ATA_PROT_DMA; + tf->hob_feature = 0; + tf->feature = ATA_DSM_TRIM; + tf->hob_nsect = (size / 512) >> 8; + tf->nsect = size / 512; + tf->command = ATA_CMD_DSM; + } + + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | + ATA_TFLAG_WRITE; + + ata_qc_set_pc_nbytes(qc); return 0; @@ -2898,6 +3148,188 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) } /** + * ata_mselect_caching - Simulate MODE SELECT for caching info page + * @qc: Storage for translated ATA taskfile + * @buf: input buffer + * @len: number of valid bytes in the input buffer + * + * Prepare a taskfile to modify caching information for the device. + * + * LOCKING: + * None. + */ +static int ata_mselect_caching(struct ata_queued_cmd *qc, + const u8 *buf, int len) +{ + struct ata_taskfile *tf = &qc->tf; + struct ata_device *dev = qc->dev; + char mpage[CACHE_MPAGE_LEN]; + u8 wce; + + /* + * The first two bytes of def_cache_mpage are a header, so offsets + * in mpage are off by 2 compared to buf. Same for len. + */ + + if (len != CACHE_MPAGE_LEN - 2) + return -EINVAL; + + wce = buf[0] & (1 << 2); + + /* + * Check that read-only bits are not modified. + */ + ata_msense_caching(dev->id, mpage, false); + mpage[2] &= ~(1 << 2); + mpage[2] |= wce; + if (memcmp(mpage + 2, buf, CACHE_MPAGE_LEN - 2) != 0) + return -EINVAL; + + tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; + tf->protocol = ATA_PROT_NODATA; + tf->nsect = 0; + tf->command = ATA_CMD_SET_FEATURES; + tf->feature = wce ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF; + return 0; +} + +/** + * ata_scsiop_mode_select - Simulate MODE SELECT 6, 10 commands + * @qc: Storage for translated ATA taskfile + * + * Converts a MODE SELECT command to an ATA SET FEATURES taskfile. + * Assume this is invoked for direct access devices (e.g. disks) only. + * There should be no block descriptor for other device types. + * + * LOCKING: + * spin_lock_irqsave(host lock) + */ +static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *scmd = qc->scsicmd; + const u8 *cdb = scmd->cmnd; + const u8 *p; + u8 pg, spg; + unsigned six_byte, pg_len, hdr_len, bd_len; + int len; + + VPRINTK("ENTER\n"); + + six_byte = (cdb[0] == MODE_SELECT); + if (six_byte) { + if (scmd->cmd_len < 5) + goto invalid_fld; + + len = cdb[4]; + hdr_len = 4; + } else { + if (scmd->cmd_len < 9) + goto invalid_fld; + + len = (cdb[7] << 8) + cdb[8]; + hdr_len = 8; + } + + /* We only support PF=1, SP=0. */ + if ((cdb[1] & 0x11) != 0x10) + goto invalid_fld; + + /* Test early for possible overrun. */ + if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len) + goto invalid_param_len; + + p = page_address(sg_page(scsi_sglist(scmd))); + + /* Move past header and block descriptors. */ + if (len < hdr_len) + goto invalid_param_len; + + if (six_byte) + bd_len = p[3]; + else + bd_len = (p[6] << 8) + p[7]; + + len -= hdr_len; + p += hdr_len; + if (len < bd_len) + goto invalid_param_len; + if (bd_len != 0 && bd_len != 8) + goto invalid_param; + + len -= bd_len; + p += bd_len; + if (len == 0) + goto skip; + + /* Parse both possible formats for the mode page headers. */ + pg = p[0] & 0x3f; + if (p[0] & 0x40) { + if (len < 4) + goto invalid_param_len; + + spg = p[1]; + pg_len = (p[2] << 8) | p[3]; + p += 4; + len -= 4; + } else { + if (len < 2) + goto invalid_param_len; + + spg = 0; + pg_len = p[1]; + p += 2; + len -= 2; + } + + /* + * No mode subpages supported (yet) but asking for _all_ + * subpages may be valid + */ + if (spg && (spg != ALL_SUB_MPAGES)) + goto invalid_param; + if (pg_len > len) + goto invalid_param_len; + + switch (pg) { + case CACHE_MPAGE: + if (ata_mselect_caching(qc, p, pg_len) < 0) + goto invalid_param; + break; + + default: /* invalid page code */ + goto invalid_param; + } + + /* + * Only one page has changeable data, so we only support setting one + * page at a time. + */ + if (len > pg_len) + goto invalid_param; + + return 0; + + invalid_fld: + /* "Invalid field in CDB" */ + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); + return 1; + + invalid_param: + /* "Invalid field in parameter list" */ + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x26, 0x0); + return 1; + + invalid_param_len: + /* "Parameter list length error" */ + ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x1a, 0x0); + return 1; + + skip: + scmd->result = SAM_STAT_GOOD; + return 1; +} + +/** * ata_get_xlat_func - check if SCSI to ATA translation is possible * @dev: ATA device * @cmd: SCSI command opcode to consider @@ -2921,6 +3353,9 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) case WRITE_16: return ata_scsi_rw_xlat; + case WRITE_SAME_16: + return ata_scsi_write_same_xlat; + case SYNCHRONIZE_CACHE: if (ata_try_flush_cache(dev)) return ata_scsi_flush_xlat; @@ -2934,6 +3369,11 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) case ATA_16: return ata_scsi_pass_thru; + case MODE_SELECT: + case MODE_SELECT_10: + return ata_scsi_mode_select_xlat; + break; + case START_STOP: return ata_scsi_start_stop_xlat; } @@ -2966,7 +3406,6 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, } static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, - void (*done)(struct scsi_cmnd *), struct ata_device *dev) { u8 scsi_op = scmd->cmnd[0]; @@ -3000,9 +3439,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, } if (xlat_func) - rc = ata_scsi_translate(dev, scmd, done, xlat_func); + rc = ata_scsi_translate(dev, scmd, xlat_func); else - ata_scsi_simulate(dev, scmd, done); + ata_scsi_simulate(dev, scmd); return rc; @@ -3010,14 +3449,14 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", scmd->cmd_len, scsi_op, dev->cdb_len); scmd->result = DID_ERROR << 16; - done(scmd); + scmd->scsi_done(scmd); return 0; } /** * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device + * @shost: SCSI host of command to be sent * @cmd: SCSI command to be sent - * @done: Completion function, called when command is complete * * In some cases, this function translates SCSI commands into * ATA taskfiles, and queues the taskfiles to be sent to @@ -3027,37 +3466,36 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, * ATA and ATAPI devices appearing as SCSI devices. * * LOCKING: - * Releases scsi-layer-held lock, and obtains host lock. + * ATA host lock * * RETURNS: * Return value from __ata_scsi_queuecmd() if @cmd can be queued, * 0 otherwise. */ -int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct ata_port *ap; struct ata_device *dev; struct scsi_device *scsidev = cmd->device; - struct Scsi_Host *shost = scsidev->host; int rc = 0; + unsigned long irq_flags; ap = ata_shost_to_port(shost); - spin_unlock(shost->host_lock); - spin_lock(ap->lock); + spin_lock_irqsave(ap->lock, irq_flags); ata_scsi_dump_cdb(ap, cmd); dev = ata_scsi_find_dev(ap, scsidev); if (likely(dev)) - rc = __ata_scsi_queuecmd(cmd, done, dev); + rc = __ata_scsi_queuecmd(cmd, dev); else { cmd->result = (DID_BAD_TARGET << 16); - done(cmd); + cmd->scsi_done(cmd); } - spin_unlock(ap->lock); - spin_lock(shost->host_lock); + spin_unlock_irqrestore(ap->lock, irq_flags); + return rc; } @@ -3065,7 +3503,6 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) * ata_scsi_simulate - simulate SCSI command on ATA device * @dev: the target device * @cmd: SCSI command being sent to device. - * @done: SCSI command completion function. * * Interprets and directly executes a select list of SCSI commands * that can be handled internally. @@ -3074,8 +3511,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) * spin_lock_irqsave(host lock) */ -void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)) +void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) { struct ata_scsi_args args; const u8 *scsicmd = cmd->cmnd; @@ -3084,17 +3520,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, args.dev = dev; args.id = dev->id; args.cmd = cmd; - args.done = done; + args.done = cmd->scsi_done; switch(scsicmd[0]) { /* TODO: worth improving? */ case FORMAT_UNIT: - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); break; case INQUIRY: if (scsicmd[1] & 2) /* is CmdDt set? */ - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); else switch (scsicmd[2]) { @@ -3110,8 +3546,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, case 0x89: ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); break; + case 0xb0: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b0); + break; + case 0xb1: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1); + break; + case 0xb2: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2); + break; default: - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); break; } break; @@ -3121,11 +3566,6 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); break; - case MODE_SELECT: /* unconditionally return */ - case MODE_SELECT_10: /* bad-field-in-cdb */ - ata_scsi_invalid_field(cmd, done); - break; - case READ_CAPACITY: ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); break; @@ -3134,7 +3574,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); else - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); break; case REPORT_LUNS: @@ -3144,7 +3584,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, case REQUEST_SENSE: ata_scsi_set_sense(cmd, 0, 0, 0); cmd->result = (DRIVER_SENSE << 24); - done(cmd); + cmd->scsi_done(cmd); break; /* if we reach this, then writeback caching is disabled, @@ -3166,14 +3606,14 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) ata_scsi_rbuf_fill(&args, ata_scsiop_noop); else - ata_scsi_invalid_field(cmd, done); + ata_scsi_invalid_field(cmd); break; /* all other commands */ default: ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); /* "Invalid command operation code" */ - done(cmd); + cmd->scsi_done(cmd); break; } } @@ -3191,15 +3631,17 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) if (!shost) goto err_alloc; + shost->eh_noresume = 1; *(struct ata_port **)&shost->hostdata[0] = ap; ap->scsi_host = shost; - shost->transportt = &ata_scsi_transport_template; + shost->transportt = ata_scsi_transport_template; shost->unique_id = ap->print_id; shost->max_id = 16; shost->max_lun = 1; shost->max_channel = 1; shost->max_cmd_len = 16; + shost->no_write_same = 1; /* Schedule policy is determined by ->qc_defer() * callback and it needs to see every deferred qc. @@ -3208,7 +3650,8 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) */ shost->max_host_blocked = 1; - rc = scsi_add_host(ap->scsi_host, ap->host->dev); + rc = scsi_add_host_with_dma(ap->scsi_host, + &ap->tdev, ap->host->dev); if (rc) goto err_add; } @@ -3234,16 +3677,13 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) struct ata_link *link; struct ata_device *dev; - if (ap->flags & ATA_FLAG_DISABLED) - return; - repeat: - ata_port_for_each_link(link, ap) { - ata_link_for_each_dev(dev, link) { + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ENABLED) { struct scsi_device *sdev; int channel = 0, id = 0; - if (!ata_dev_enabled(dev) || dev->sdev) + if (dev->sdev) continue; if (ata_is_host_link(link)) @@ -3256,6 +3696,8 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) if (!IS_ERR(sdev)) { dev->sdev = sdev; scsi_device_put(sdev); + } else { + dev->sdev = NULL; } } } @@ -3264,9 +3706,9 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) * failure occurred, scan would have failed silently. Check * whether all devices are attached. */ - ata_port_for_each_link(link, ap) { - ata_link_for_each_dev(dev, link) { - if (ata_dev_enabled(dev) && !dev->sdev) + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ENABLED) { + if (!dev->sdev) goto exit_loop; } } @@ -3293,12 +3735,11 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) goto repeat; } - ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan " - "failed without making any progress,\n" - " switching to async\n"); + ata_port_err(ap, + "WARNING: synchronous SCSI scan failed without making any progress, switching to async\n"); } - queue_delayed_work(ata_aux_wq, &ap->hotplug_task, + queue_delayed_work(system_long_wq, &ap->hotplug_task, round_jiffies_relative(HZ)); } @@ -3377,8 +3818,8 @@ static void ata_scsi_remove_dev(struct ata_device *dev) mutex_unlock(&ap->scsi_host->scan_mutex); if (sdev) { - ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n", - sdev->sdev_gendev.bus_id); + ata_dev_info(dev, "detaching (SCSI %s)\n", + dev_name(&sdev->sdev_gendev)); scsi_remove_device(sdev); scsi_device_put(sdev); @@ -3390,7 +3831,7 @@ static void ata_scsi_handle_link_detach(struct ata_link *link) struct ata_port *ap = link->ap; struct ata_device *dev; - ata_link_for_each_dev(dev, link) { + ata_for_each_dev(dev, link, ALL) { unsigned long flags; if (!(dev->flags & ATA_DFLAG_DETACHED)) @@ -3400,6 +3841,9 @@ static void ata_scsi_handle_link_detach(struct ata_link *link) dev->flags &= ~ATA_DFLAG_DETACHED; spin_unlock_irqrestore(ap->lock, flags); + if (zpodd_dev_enabled(dev)) + zpodd_exit(dev); + ata_scsi_remove_dev(dev); } } @@ -3444,7 +3888,29 @@ void ata_scsi_hotplug(struct work_struct *work) return; } + /* + * XXX - UGLY HACK + * + * The block layer suspend/resume path is fundamentally broken due + * to freezable kthreads and workqueue and may deadlock if a block + * device gets removed while resume is in progress. I don't know + * what the solution is short of removing freezable kthreads and + * workqueues altogether. + * + * The following is an ugly hack to avoid kicking off device + * removal while freezer is active. This is a joke but does avoid + * this particular deadlock scenario. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=62801 + * http://marc.info/?l=linux-kernel&m=138695698516487 + */ +#ifdef CONFIG_FREEZER + while (pm_freezing) + msleep(10); +#endif + DPRINTK("ENTER\n"); + mutex_lock(&ap->scsi_scan_mutex); /* Unplug detached devices. We cannot use link iterator here * because PMP links have to be scanned even if PMP is @@ -3458,6 +3924,7 @@ void ata_scsi_hotplug(struct work_struct *work) /* scan for new ones */ ata_scsi_scan_host(ap, 0); + mutex_unlock(&ap->scsi_scan_mutex); DPRINTK("EXIT\n"); } @@ -3477,8 +3944,8 @@ void ata_scsi_hotplug(struct work_struct *work) * RETURNS: * Zero. */ -static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, - unsigned int id, unsigned int lun) +int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, + unsigned int id, unsigned int lun) { struct ata_port *ap = ata_shost_to_port(shost); unsigned long flags; @@ -3490,7 +3957,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, if (lun != SCAN_WILD_CARD && lun) return -EINVAL; - if (ap->nr_pmp_links == 0) { + if (!sata_pmp_attached(ap)) { if (channel != SCAN_WILD_CARD && channel) return -EINVAL; devno = id; @@ -3505,10 +3972,10 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, if (devno == SCAN_WILD_CARD) { struct ata_link *link; - ata_port_for_each_link(link, ap) { + ata_for_each_link(link, ap, EDGE) { struct ata_eh_info *ehi = &link->eh_info; - ehi->probe_mask |= (1 << ata_link_max_devices(link)) - 1; - ehi->action |= ATA_EH_SOFTRESET; + ehi->probe_mask |= ATA_ALL_DEVICES; + ehi->action |= ATA_EH_RESET; } } else { struct ata_device *dev = ata_find_dev(ap, devno); @@ -3516,8 +3983,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, if (dev) { struct ata_eh_info *ehi = &dev->link->eh_info; ehi->probe_mask |= 1 << dev->devno; - ehi->action |= ATA_EH_SOFTRESET; - ehi->flags |= ATA_EHI_RESUME_LINK; + ehi->action |= ATA_EH_RESET; } else rc = -EINVAL; } @@ -3537,9 +4003,7 @@ static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, * @work: Pointer to ATA port to perform scsi_rescan_device() * * After ATA pass thru (SAT) commands are executed successfully, - * libata need to propagate the changes to SCSI layer. This - * function must be executed from ata_aux_wq such that sdev - * attach/detach don't race with rescan. + * libata need to propagate the changes to SCSI layer. * * LOCKING: * Kernel thread context (may sleep). @@ -3552,13 +4016,14 @@ void ata_scsi_dev_rescan(struct work_struct *work) struct ata_device *dev; unsigned long flags; + mutex_lock(&ap->scsi_scan_mutex); spin_lock_irqsave(ap->lock, flags); - ata_port_for_each_link(link, ap) { - ata_link_for_each_dev(dev, link) { + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ENABLED) { struct scsi_device *sdev = dev->sdev; - if (!ata_dev_enabled(dev) || !sdev) + if (!sdev) continue; if (scsi_device_get(sdev)) continue; @@ -3571,6 +4036,7 @@ void ata_scsi_dev_rescan(struct work_struct *work) } spin_unlock_irqrestore(ap->lock, flags); + mutex_unlock(&ap->scsi_scan_mutex); } /** @@ -3597,7 +4063,7 @@ struct ata_port *ata_sas_port_alloc(struct ata_host *host, return NULL; ap->port_no = 0; - ap->lock = shost->host_lock; + ap->lock = &host->lock; ap->pio_mask = port_info->pio_mask; ap->mwdma_mask = port_info->mwdma_mask; ap->udma_mask = port_info->udma_mask; @@ -3623,6 +4089,12 @@ EXPORT_SYMBOL_GPL(ata_sas_port_alloc); */ int ata_sas_port_start(struct ata_port *ap) { + /* + * the port is marked as frozen at allocation time, but if we don't + * have new eh, we won't thaw it + */ + if (!ap->ops->error_handler) + ap->pflags &= ~ATA_PFLAG_FROZEN; return 0; } EXPORT_SYMBOL_GPL(ata_sas_port_start); @@ -3643,6 +4115,26 @@ void ata_sas_port_stop(struct ata_port *ap) EXPORT_SYMBOL_GPL(ata_sas_port_stop); /** + * ata_sas_async_probe - simply schedule probing and return + * @ap: Port to probe + * + * For batch scheduling of probe for sas attached ata devices, assumes + * the port has already been through ata_sas_port_init() + */ +void ata_sas_async_probe(struct ata_port *ap) +{ + __ata_port_probe(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_async_probe); + +int ata_sas_sync_probe(struct ata_port *ap) +{ + return ata_port_probe(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_sync_probe); + + +/** * ata_sas_port_init - Initialize a SATA device * @ap: SATA port to initialize * @@ -3657,12 +4149,10 @@ int ata_sas_port_init(struct ata_port *ap) { int rc = ap->ops->port_start(ap); - if (!rc) { - ap->print_id = ata_print_id++; - rc = ata_bus_probe(ap); - } - - return rc; + if (rc) + return rc; + ap->print_id = atomic_inc_return(&ata_print_id); + return 0; } EXPORT_SYMBOL_GPL(ata_sas_port_init); @@ -3700,7 +4190,6 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure); /** * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device * @cmd: SCSI command to be sent - * @done: Completion function, called when command is complete * @ap: ATA port to which the command is being sent * * RETURNS: @@ -3708,18 +4197,17 @@ EXPORT_SYMBOL_GPL(ata_sas_slave_configure); * 0 otherwise. */ -int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), - struct ata_port *ap) +int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap) { int rc = 0; ata_scsi_dump_cdb(ap, cmd); - if (likely(ata_scsi_dev_enabled(ap->link.device))) - rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); + if (likely(ata_dev_enabled(ap->link.device))) + rc = __ata_scsi_queuecmd(cmd, ap->link.device); else { cmd->result = (DID_BAD_TARGET << 16); - done(cmd); + cmd->scsi_done(cmd); } return rc; } |
