diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 604 |
1 files changed, 321 insertions, 283 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index b2c95dbe9d6..3f50dfcb322 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -75,28 +75,6 @@ struct kmem_cache *scsi_sdb_cache; */ #define SCSI_QUEUE_DELAY 3 -/* - * Function: scsi_unprep_request() - * - * Purpose: Remove all preparation done for a request, including its - * associated scsi_cmnd, so that it can be requeued. - * - * Arguments: req - request to unprepare - * - * Lock status: Assumed that no locks are held upon entry. - * - * Returns: Nothing. - */ -static void scsi_unprep_request(struct request *req) -{ - struct scsi_cmnd *cmd = req->special; - - blk_unprep_request(req); - req->special = NULL; - - scsi_put_command(cmd); -} - /** * __scsi_queue_insert - private queue insertion * @cmd: The SCSI command being requeued @@ -109,7 +87,7 @@ static void scsi_unprep_request(struct request *req) * for a requeue after completion, which should only occur in this * file. */ -static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) +static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) { struct Scsi_Host *host = cmd->device->host; struct scsi_device *device = cmd->device; @@ -155,15 +133,15 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) /* * Requeue this command. It will go before all other commands - * that are already in the queue. + * that are already in the queue. Schedule requeue work under + * lock such that the kblockd_schedule_work() call happens + * before blk_cleanup_queue() finishes. */ + cmd->result = 0; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); + kblockd_schedule_work(&device->requeue_work); spin_unlock_irqrestore(q->queue_lock, flags); - - kblockd_schedule_work(q, &device->requeue_work); - - return 0; } /* @@ -185,9 +163,9 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) * Notes: This could be called either from an interrupt context or a * normal process context. */ -int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) +void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) { - return __scsi_queue_insert(cmd, reason, 1); + __scsi_queue_insert(cmd, reason, 1); } /** * scsi_execute - insert request and wait for the result @@ -207,7 +185,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) */ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, int flags, + unsigned char *sense, int timeout, int retries, u64 flags, int *resid) { struct request *req; @@ -217,6 +195,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); if (!req) return ret; + blk_rq_set_block_pc(req); if (bufflen && blk_rq_map_kern(sdev->request_queue, req, buffer, bufflen, __GFP_WAIT)) @@ -228,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req->sense_len = 0; req->retries = retries; req->timeout = timeout; - req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; /* @@ -255,11 +233,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(scsi_execute); - -int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, +int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, - int *resid) + int *resid, u64 flags) { char *sense = NULL; int result; @@ -270,14 +247,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, return DRIVER_ERROR << 24; } result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, 0, resid); + sense, timeout, retries, flags, resid); if (sshdr) scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); kfree(sense); return result; } -EXPORT_SYMBOL(scsi_execute_req); +EXPORT_SYMBOL(scsi_execute_req_flags); /* * Function: scsi_init_cmd_errh() @@ -387,37 +364,18 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost) return 0; } -/* - * Function: scsi_run_queue() - * - * Purpose: Select a proper request queue to serve next - * - * Arguments: q - last request's queue - * - * Returns: Nothing - * - * Notes: The previous command was completely finished, start - * a new one if possible. - */ -static void scsi_run_queue(struct request_queue *q) +static void scsi_starved_list_run(struct Scsi_Host *shost) { - struct scsi_device *sdev = q->queuedata; - struct Scsi_Host *shost; LIST_HEAD(starved_list); + struct scsi_device *sdev; unsigned long flags; - /* if the device is dead, sdev will be NULL, so no queue to run */ - if (!sdev) - return; - - shost = sdev->host; - if (scsi_target(sdev)->single_lun) - scsi_single_lun_run(sdev); - spin_lock_irqsave(shost->host_lock, flags); list_splice_init(&shost->starved_list, &starved_list); while (!list_empty(&starved_list)) { + struct request_queue *slq; + /* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn @@ -440,15 +398,51 @@ static void scsi_run_queue(struct request_queue *q) continue; } - spin_unlock(shost->host_lock); - spin_lock(sdev->request_queue->queue_lock); - __blk_run_queue(sdev->request_queue); - spin_unlock(sdev->request_queue->queue_lock); - spin_lock(shost->host_lock); + /* + * Once we drop the host lock, a racing scsi_remove_device() + * call may remove the sdev from the starved list and destroy + * it and the queue. Mitigate by taking a reference to the + * queue and never touching the sdev again after we drop the + * host lock. Note: if __scsi_remove_device() invokes + * blk_cleanup_queue() before the queue is run from this + * function then blk_run_queue() will return immediately since + * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. + */ + slq = sdev->request_queue; + if (!blk_get_queue(slq)) + continue; + spin_unlock_irqrestore(shost->host_lock, flags); + + blk_run_queue(slq); + blk_put_queue(slq); + + spin_lock_irqsave(shost->host_lock, flags); } /* put any unprocessed entries back */ list_splice(&starved_list, &shost->starved_list); spin_unlock_irqrestore(shost->host_lock, flags); +} + +/* + * Function: scsi_run_queue() + * + * Purpose: Select a proper request queue to serve next + * + * Arguments: q - last request's queue + * + * Returns: Nothing + * + * Notes: The previous command was completely finished, start + * a new one if possible. + */ +static void scsi_run_queue(struct request_queue *q) +{ + struct scsi_device *sdev = q->queuedata; + + if (scsi_target(sdev)->single_lun) + scsi_single_lun_run(sdev); + if (!list_empty(&sdev->host->starved_list)) + scsi_starved_list_run(sdev->host); blk_run_queue(q); } @@ -483,15 +477,20 @@ void scsi_requeue_run_queue(struct work_struct *work) */ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) { + struct scsi_device *sdev = cmd->device; struct request *req = cmd->request; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - scsi_unprep_request(req); + blk_unprep_request(req); + req->special = NULL; + scsi_put_command(cmd); blk_requeue_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); scsi_run_queue(q); + + put_device(&sdev->sdev_gendev); } void scsi_next_command(struct scsi_cmnd *cmd) @@ -499,13 +498,9 @@ void scsi_next_command(struct scsi_cmnd *cmd) struct scsi_device *sdev = cmd->device; struct request_queue *q = sdev->request_queue; - /* need to hold a reference on the device before we let go of the cmd */ - get_device(&sdev->sdev_gendev); - scsi_put_command(cmd); scsi_run_queue(q); - /* ok to remove device now */ put_device(&sdev->sdev_gendev); } @@ -517,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost) scsi_run_queue(sdev->request_queue); } -static void __scsi_release_buffers(struct scsi_cmnd *, int); - -/* - * Function: scsi_end_request() - * - * Purpose: Post-processing of completed commands (usually invoked at end - * of upper level post-processing and scsi_io_completion). - * - * Arguments: cmd - command that is complete. - * error - 0 if I/O indicates success, < 0 for I/O error. - * bytes - number of bytes of completed I/O - * requeue - indicates whether we should requeue leftovers. - * - * Lock status: Assumed that lock is not held upon entry. - * - * Returns: cmd if requeue required, NULL otherwise. - * - * Notes: This is called for block device requests in order to - * mark some number of sectors as complete. - * - * We are guaranteeing that the request queue will be goosed - * at some point during this call. - * Notes: If cmd was requeued, upon return it will be a stale pointer. - */ -static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, - int bytes, int requeue) -{ - struct request_queue *q = cmd->device->request_queue; - struct request *req = cmd->request; - - /* - * If there are blocks left over at the end, set up the command - * to queue the remainder of them. - */ - if (blk_end_request(req, error, bytes)) { - /* kill remainder if no retrys */ - if (error && scsi_noretry_cmd(cmd)) - blk_end_request_all(req, error); - else { - if (requeue) { - /* - * Bleah. Leftovers again. Stick the - * leftovers in the front of the - * queue, and goose the queue again. - */ - scsi_release_buffers(cmd); - scsi_requeue_command(q, cmd); - cmd = NULL; - } - return cmd; - } - } - - /* - * This will goose the queue request function at the end, so we don't - * need to worry about launching another command. - */ - __scsi_release_buffers(cmd, 0); - scsi_next_command(cmd); - return NULL; -} - static inline unsigned int scsi_sgtable_index(unsigned short nents) { unsigned int index; @@ -630,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb) __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free); } -static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) -{ - - if (cmd->sdb.table.nents) - scsi_free_sgtable(&cmd->sdb); - - memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - - if (do_bidi_check && scsi_bidi_cmnd(cmd)) { - struct scsi_data_buffer *bidi_sdb = - cmd->request->next_rq->special; - scsi_free_sgtable(bidi_sdb); - kmem_cache_free(scsi_sdb_cache, bidi_sdb); - cmd->request->next_rq->special = NULL; - } - - if (scsi_prot_sg_count(cmd)) - scsi_free_sgtable(cmd->prot_sdb); -} - /* * Function: scsi_release_buffers() * - * Purpose: Completion processing for block device I/O requests. + * Purpose: Free resources allocate for a scsi_command. * * Arguments: cmd - command that we are bailing. * @@ -664,15 +577,43 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) * Notes: In the event that an upper level driver rejects a * command, we must release resources allocated during * the __init_io() function. Primarily this would involve - * the scatter-gather table, and potentially any bounce - * buffers. + * the scatter-gather table. */ void scsi_release_buffers(struct scsi_cmnd *cmd) { - __scsi_release_buffers(cmd, 1); + if (cmd->sdb.table.nents) + scsi_free_sgtable(&cmd->sdb); + + memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + + if (scsi_prot_sg_count(cmd)) + scsi_free_sgtable(cmd->prot_sdb); } EXPORT_SYMBOL(scsi_release_buffers); +static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) +{ + struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; + + scsi_free_sgtable(bidi_sdb); + kmem_cache_free(scsi_sdb_cache, bidi_sdb); + cmd->request->next_rq->special = NULL; +} + +/** + * __scsi_error_from_host_byte - translate SCSI error code into errno + * @cmd: SCSI command (unused) + * @result: scsi error code + * + * Translate SCSI error code into standard UNIX errno. + * Return values: + * -ENOLINK temporary transport failure + * -EREMOTEIO permanent target failure, do not retry + * -EBADE permanent nexus failure, retry on other path + * -ENOSPC No write space available + * -ENODATA Medium error + * -EIO unspecified I/O error + */ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) { int error = 0; @@ -682,13 +623,21 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) error = -ENOLINK; break; case DID_TARGET_FAILURE: - cmd->result |= (DID_OK << 16); + set_host_byte(cmd, DID_OK); error = -EREMOTEIO; break; case DID_NEXUS_FAILURE: - cmd->result |= (DID_OK << 16); + set_host_byte(cmd, DID_OK); error = -EBADE; break; + case DID_ALLOC_FAILURE: + set_host_byte(cmd, DID_OK); + error = -ENOSPC; + break; + case DID_MEDIUM_ERROR: + set_host_byte(cmd, DID_OK); + error = -ENODATA; + break; default: error = -EIO; break; @@ -708,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) * * Returns: Nothing * - * Notes: This function is matched in terms of capabilities to - * the function that created the scatter-gather list. - * In other words, if there are no bounce buffers - * (the normal case for most drivers), we don't need - * the logic to deal with cleaning up afterwards. - * - * We must call scsi_end_request(). This will finish off - * the specified number of sectors. If we are done, the - * command block will be released and the queue function - * will be goosed. If we are not done then we have to + * Notes: We will finish off the specified number of sectors. If we + * are done, the command block will be released and the queue + * function will be goosed. If we are not done then we have to * figure out what to do next: * * a) We can call scsi_requeue_command(). The request @@ -726,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) * be used if we made forward progress, or if we want * to switch from READ(10) to READ(6) for example. * - * b) We can call scsi_queue_insert(). The request will + * b) We can call __scsi_queue_insert(). The request will * be put back on the queue and retried using the same * command as before, possibly after a delay. * @@ -745,6 +687,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, ACTION_DELAYED_RETRY} action; char *description = NULL; + unsigned long wait_for = (cmd->allowed + 1) * req->timeout; if (result) { sense_valid = scsi_command_normalize_sense(cmd, &sshdr); @@ -753,7 +696,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ - req->errors = result; if (result) { if (sense_valid && req->sense) { /* @@ -769,6 +711,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (!sense_deferred) error = __scsi_error_from_host_byte(cmd, result); } + /* + * __scsi_error_from_host_byte may have reset the host_byte + */ + req->errors = cmd->result; req->resid_len = scsi_get_resid(cmd); @@ -780,11 +726,21 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) req->next_rq->resid_len = scsi_in(cmd)->resid; scsi_release_buffers(cmd); + scsi_release_bidi_buffers(cmd); + blk_end_request_all(req, 0); scsi_next_command(cmd); return; } + } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { + /* + * Certain non BLOCK_PC requests are commands that don't + * actually transfer anything (FLUSH), so cannot use + * good_bytes != blk_rq_bytes(req) as the signal for an error. + * This sets the error explicitly for the problem case. + */ + error = __scsi_error_from_host_byte(cmd, result); } /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ @@ -819,12 +775,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } /* - * A number of bytes were successfully read. If there - * are leftovers and there is some kind of error - * (result != 0), retry the rest. + * If we finished all bytes in the request we are done now. */ - if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) - return; + if (!blk_end_request(req, error, good_bytes)) + goto next_command; + + /* + * Kill remainder if no retrys. + */ + if (error && scsi_noretry_cmd(cmd)) { + blk_end_request_all(req, error); + goto next_command; + } + + /* + * If there had been no error, but we have leftover bytes in the + * requeues just queue the command up again. + */ + if (result == 0) + goto requeue; error = __scsi_error_from_host_byte(cmd, result); @@ -874,12 +843,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) action = ACTION_FAIL; error = -EILSEQ; /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ - } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) && - (cmd->cmnd[0] == UNMAP || - cmd->cmnd[0] == WRITE_SAME_16 || - cmd->cmnd[0] == WRITE_SAME)) { - description = "Discard failure"; + } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { + switch (cmd->cmnd[0]) { + case UNMAP: + description = "Discard failure"; + break; + case WRITE_SAME: + case WRITE_SAME_16: + if (cmd->cmnd[1] & 0x8) + description = "Discard failure"; + else + description = + "Write same failure"; + break; + default: + description = "Invalid command failure"; + break; + } action = ACTION_FAIL; + error = -EREMOTEIO; } else action = ACTION_FAIL; break; @@ -930,10 +912,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) action = ACTION_FAIL; } + if (action != ACTION_FAIL && + time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { + action = ACTION_FAIL; + description = "Command timed out"; + } + switch (action) { case ACTION_FAIL: /* Give up and fail the remainder of the request */ - scsi_release_buffers(cmd); if (!(req->cmd_flags & REQ_QUIET)) { if (description) scmd_printk(KERN_INFO, cmd, "%s\n", @@ -943,12 +930,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) scsi_print_sense("", cmd); scsi_print_command(cmd); } - if (blk_end_request_err(req, error)) - scsi_requeue_command(q, cmd); - else - scsi_next_command(cmd); - break; + if (!blk_end_request_err(req, error)) + goto next_command; + /*FALLTHRU*/ case ACTION_REPREP: + requeue: /* Unprep the request and put it back at the head of the queue. * A new command will be prepared and issued. */ @@ -964,6 +950,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); break; } + return; + +next_command: + scsi_release_buffers(cmd); + scsi_next_command(cmd); } static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, @@ -979,8 +970,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, return BLKPREP_DEFER; } - req->buffer = NULL; - /* * Next, walk the list, and fill in the addresses and sizes of * each segment. @@ -1005,6 +994,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, */ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) { + struct scsi_device *sdev = cmd->device; struct request *rq = cmd->request; int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); @@ -1052,6 +1042,7 @@ err_exit: scsi_release_buffers(cmd); cmd->request->special = NULL; scsi_put_command(cmd); + put_device(&sdev->sdev_gendev); return error; } EXPORT_SYMBOL(scsi_init_io); @@ -1062,9 +1053,15 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, struct scsi_cmnd *cmd; if (!req->special) { + /* Bail if we can't get a reference to the device */ + if (!get_device(&sdev->sdev_gendev)) + return NULL; + cmd = scsi_get_command(sdev, GFP_ATOMIC); - if (unlikely(!cmd)) + if (unlikely(!cmd)) { + put_device(&sdev->sdev_gendev); return NULL; + } req->special = cmd; } else { cmd = req->special; @@ -1082,15 +1079,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd; - int ret = scsi_prep_state_check(sdev, req); - - if (ret != BLKPREP_OK) - return ret; - - cmd = scsi_get_cmd_from_req(sdev, req); - if (unlikely(!cmd)) - return BLKPREP_DEFER; + struct scsi_cmnd *cmd = req->special; /* * BLOCK_PC requests may transfer data, in which case they must @@ -1110,7 +1099,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) BUG_ON(blk_rq_bytes(req)); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - req->buffer = NULL; } cmd->cmd_len = req->cmd_len; @@ -1134,15 +1122,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); */ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd; - int ret = scsi_prep_state_check(sdev, req); - - if (ret != BLKPREP_OK) - return ret; + struct scsi_cmnd *cmd = req->special; if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh && sdev->scsi_dh_data->scsi_dh->prep_fn)) { - ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); + int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); if (ret != BLKPREP_OK) return ret; } @@ -1152,16 +1136,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) */ BUG_ON(!req->nr_phys_segments); - cmd = scsi_get_cmd_from_req(sdev, req); - if (unlikely(!cmd)) - return BLKPREP_DEFER; - memset(cmd->cmnd, 0, BLK_MAX_CDB); return scsi_init_io(cmd, GFP_ATOMIC); } EXPORT_SYMBOL(scsi_setup_fs_cmnd); -int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) +static int +scsi_prep_state_check(struct scsi_device *sdev, struct request *req) { int ret = BLKPREP_OK; @@ -1172,6 +1153,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { switch (sdev->sdev_state) { case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: /* * If the device is offline we refuse to process any * commands. The device must be brought online @@ -1212,9 +1194,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) } return ret; } -EXPORT_SYMBOL(scsi_prep_state_check); -int scsi_prep_return(struct request_queue *q, struct request *req, int ret) +static int +scsi_prep_return(struct request_queue *q, struct request *req, int ret) { struct scsi_device *sdev = q->queuedata; @@ -1226,6 +1208,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) struct scsi_cmnd *cmd = req->special; scsi_release_buffers(cmd); scsi_put_command(cmd); + put_device(&sdev->sdev_gendev); req->special = NULL; } break; @@ -1244,18 +1227,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) return ret; } -EXPORT_SYMBOL(scsi_prep_return); -int scsi_prep_fn(struct request_queue *q, struct request *req) +static int scsi_prep_fn(struct request_queue *q, struct request *req) { struct scsi_device *sdev = q->queuedata; - int ret = BLKPREP_KILL; + struct scsi_cmnd *cmd; + int ret; - if (req->cmd_type == REQ_TYPE_BLOCK_PC) + ret = scsi_prep_state_check(sdev, req); + if (ret != BLKPREP_OK) + goto out; + + cmd = scsi_get_cmd_from_req(sdev, req); + if (unlikely(!cmd)) { + ret = BLKPREP_DEFER; + goto out; + } + + if (req->cmd_type == REQ_TYPE_FS) + ret = scsi_cmd_to_driver(cmd)->init_command(cmd); + else if (req->cmd_type == REQ_TYPE_BLOCK_PC) ret = scsi_setup_blk_pc_cmnd(sdev, req); + else + ret = BLKPREP_KILL; + +out: return scsi_prep_return(q, req, ret); } -EXPORT_SYMBOL(scsi_prep_fn); + +static void scsi_unprep_fn(struct request_queue *q, struct request *req) +{ + if (req->cmd_type == REQ_TYPE_FS) { + struct scsi_cmnd *cmd = req->special; + struct scsi_driver *drv = scsi_cmd_to_driver(cmd); + + if (drv->uninit_command) + drv->uninit_command(cmd); + } +} /* * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else @@ -1369,24 +1378,27 @@ static inline int scsi_host_queue_ready(struct request_queue *q, * may be changed after request stacking drivers call the function, * regardless of taking lock or not. * - * When scsi can't dispatch I/Os anymore and needs to kill I/Os - * (e.g. !sdev), scsi needs to return 'not busy'. - * Otherwise, request stacking drivers may hold requests forever. + * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi + * needs to return 'not busy'. Otherwise, request stacking drivers + * may hold requests forever. */ static int scsi_lld_busy(struct request_queue *q) { struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost; - struct scsi_target *starget; - if (!sdev) + if (blk_queue_dying(q)) return 0; shost = sdev->host; - starget = scsi_target(sdev); - if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || - scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) + /* + * Ignore host/starget busy state. + * Since block layer does not have a concept of fairness across + * multiple queues, congestion of host/starget needs to be handled + * in SCSI layer. + */ + if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) return 1; return 0; @@ -1480,22 +1492,14 @@ static void scsi_softirq_done(struct request *rq) * Lock status: IO request lock assumed to be held when called. */ static void scsi_request_fn(struct request_queue *q) + __releases(q->queue_lock) + __acquires(q->queue_lock) { struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost; struct scsi_cmnd *cmd; struct request *req; - if (!sdev) { - while ((req = blk_peek_request(q)) != NULL) - scsi_kill_request(req, q); - return; - } - - if(!get_device(&sdev->sdev_gendev)) - /* We must be tearing the block queue down already */ - return; - /* * To start with, we keep looping until the queue is empty, or until * the host is no longer able to accept any more requests. @@ -1584,7 +1588,7 @@ static void scsi_request_fn(struct request_queue *q) goto out_delay; } - goto out; + return; not_ready: spin_unlock_irq(shost->host_lock); @@ -1603,12 +1607,6 @@ static void scsi_request_fn(struct request_queue *q) out_delay: if (sdev->device_busy == 0) blk_delay_queue(q, SCSI_QUEUE_DELAY); -out: - /* must be careful here...if we trigger the ->remove() function - * we cannot be holding the q lock */ - spin_unlock_irq(q->queue_lock); - put_device(&sdev->sdev_gendev); - spin_lock_irq(q->queue_lock); } u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) @@ -1627,7 +1625,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) host_dev = scsi_get_device(shost); if (host_dev && host_dev->dma_mask) - bounce_limit = *host_dev->dma_mask; + bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; return bounce_limit; } @@ -1637,7 +1635,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, request_fn_proc *request_fn) { struct request_queue *q; - struct device *dev = shost->shost_gendev.parent; + struct device *dev = shost->dma_dev; q = blk_init_queue(request_fn, NULL); if (!q) @@ -1687,26 +1685,13 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) return NULL; blk_queue_prep_rq(q, scsi_prep_fn); + blk_queue_unprep_rq(q, scsi_unprep_fn); blk_queue_softirq_done(q, scsi_softirq_done); blk_queue_rq_timed_out(q, scsi_times_out); blk_queue_lld_busy(q, scsi_lld_busy); return q; } -void scsi_free_queue(struct request_queue *q) -{ - unsigned long flags; - - WARN_ON(q->queuedata); - - /* cause scsi_request_fn() to kill all non-finished requests */ - spin_lock_irqsave(q->queue_lock, flags); - q->request_fn(q); - spin_unlock_irqrestore(q->queue_lock, flags); - - blk_cleanup_queue(q); -} - /* * Function: scsi_block_requests() * @@ -2077,6 +2062,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) switch (oldstate) { case SDEV_CREATED: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: case SDEV_QUIESCE: case SDEV_BLOCK: break; @@ -2089,6 +2075,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) switch (oldstate) { case SDEV_RUNNING: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: break; default: goto illegal; @@ -2096,6 +2083,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) break; case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: switch (oldstate) { case SDEV_CREATED: case SDEV_RUNNING: @@ -2132,6 +2120,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_RUNNING: case SDEV_QUIESCE: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: case SDEV_BLOCK: break; default: @@ -2144,7 +2133,9 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_CREATED: case SDEV_RUNNING: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: case SDEV_CANCEL: + case SDEV_CREATED_BLOCK: break; default: goto illegal; @@ -2182,7 +2173,21 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) case SDEV_EVT_MEDIA_CHANGE: envp[idx++] = "SDEV_MEDIA_CHANGE=1"; break; - + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; + break; + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; + break; + case SDEV_EVT_LUN_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; + break; default: /* do nothing */ break; @@ -2203,10 +2208,15 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) void scsi_evt_thread(struct work_struct *work) { struct scsi_device *sdev; + enum scsi_device_event evt_type; LIST_HEAD(event_list); sdev = container_of(work, struct scsi_device, event_work); + for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) + if (test_and_clear_bit(evt_type, sdev->pending_events)) + sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); + while (1) { struct scsi_event *evt; struct list_head *this, *tmp; @@ -2276,6 +2286,11 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, /* evt_type-specific initialization, if any */ switch (evt_type) { case SDEV_EVT_MEDIA_CHANGE: + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + case SDEV_EVT_LUN_CHANGE_REPORTED: default: /* do nothing */ break; @@ -2347,10 +2362,14 @@ EXPORT_SYMBOL(scsi_device_quiesce); * * Must be called with user context, may sleep. */ -void -scsi_device_resume(struct scsi_device *sdev) +void scsi_device_resume(struct scsi_device *sdev) { - if(scsi_device_set_state(sdev, SDEV_RUNNING)) + /* check if the device state was mutated prior to resume, and if + * so assume the state is being managed elsewhere (for example + * device deleted during suspend) + */ + if (sdev->sdev_state != SDEV_QUIESCE || + scsi_device_set_state(sdev, SDEV_RUNNING)) return; scsi_run_queue(sdev->request_queue); } @@ -2397,7 +2416,6 @@ EXPORT_SYMBOL(scsi_target_resume); * (which must be a legal transition). When the device is in this * state, all commands are deferred until the scsi lld reenables * the device with scsi_device_unblock or device_block_tmo fires. - * This routine assumes the host_lock is held on entry. */ int scsi_internal_device_block(struct scsi_device *sdev) @@ -2430,6 +2448,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); /** * scsi_internal_device_unblock - resume a device after a block request * @sdev: device to resume + * @new_state: state to set devices to after unblocking * * Called by scsi lld's or the midlayer to restart the device queue * for the previously suspended scsi device. Called from interrupt or @@ -2439,25 +2458,30 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); * * Notes: * This routine transitions the device to the SDEV_RUNNING state - * (which must be a legal transition) allowing the midlayer to - * goose the queue for this device. This routine assumes the - * host_lock is held upon entry. + * or to one of the offline states (which must be a legal transition) + * allowing the midlayer to goose the queue for this device. */ int -scsi_internal_device_unblock(struct scsi_device *sdev) +scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state) { struct request_queue *q = sdev->request_queue; unsigned long flags; - - /* - * Try to transition the scsi device to SDEV_RUNNING - * and goose the device queue if successful. + + /* + * Try to transition the scsi device to SDEV_RUNNING or one of the + * offlined states and goose the device queue if successful. */ - if (sdev->sdev_state == SDEV_BLOCK) - sdev->sdev_state = SDEV_RUNNING; - else if (sdev->sdev_state == SDEV_CREATED_BLOCK) - sdev->sdev_state = SDEV_CREATED; - else if (sdev->sdev_state != SDEV_CANCEL && + if ((sdev->sdev_state == SDEV_BLOCK) || + (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) + sdev->sdev_state = new_state; + else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { + if (new_state == SDEV_TRANSPORT_OFFLINE || + new_state == SDEV_OFFLINE) + sdev->sdev_state = new_state; + else + sdev->sdev_state = SDEV_CREATED; + } else if (sdev->sdev_state != SDEV_CANCEL && sdev->sdev_state != SDEV_OFFLINE) return -EINVAL; @@ -2498,26 +2522,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block); static void device_unblock(struct scsi_device *sdev, void *data) { - scsi_internal_device_unblock(sdev); + scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); } static int target_unblock(struct device *dev, void *data) { if (scsi_is_target_device(dev)) - starget_for_each_device(to_scsi_target(dev), NULL, + starget_for_each_device(to_scsi_target(dev), data, device_unblock); return 0; } void -scsi_target_unblock(struct device *dev) +scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) { if (scsi_is_target_device(dev)) - starget_for_each_device(to_scsi_target(dev), NULL, + starget_for_each_device(to_scsi_target(dev), &new_state, device_unblock); else - device_for_each_child(dev, NULL, target_unblock); + device_for_each_child(dev, &new_state, target_unblock); } EXPORT_SYMBOL_GPL(scsi_target_unblock); @@ -2567,7 +2591,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, if (*len > sg_len) *len = sg_len; - return kmap_atomic(page, KM_BIO_SRC_IRQ); + return kmap_atomic(page); } EXPORT_SYMBOL(scsi_kmap_atomic_sg); @@ -2577,6 +2601,20 @@ EXPORT_SYMBOL(scsi_kmap_atomic_sg); */ void scsi_kunmap_atomic_sg(void *virt) { - kunmap_atomic(virt, KM_BIO_SRC_IRQ); + kunmap_atomic(virt); } EXPORT_SYMBOL(scsi_kunmap_atomic_sg); + +void sdev_disable_disk_events(struct scsi_device *sdev) +{ + atomic_inc(&sdev->disk_events_disable_depth); +} +EXPORT_SYMBOL(sdev_disable_disk_events); + +void sdev_enable_disk_events(struct scsi_device *sdev) +{ + if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) + return; + atomic_dec(&sdev->disk_events_disable_depth); +} +EXPORT_SYMBOL(sdev_enable_disk_events); |
