diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 1450 |
1 files changed, 786 insertions, 664 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index f40898dc2d1..3f50dfcb322 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -12,6 +12,7 @@ #include <linux/blkdev.h> #include <linux/completion.h> #include <linux/kernel.h> +#include <linux/export.h> #include <linux/mempool.h> #include <linux/slab.h> #include <linux/init.h> @@ -65,55 +66,32 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = { }; #undef SP -static struct kmem_cache *scsi_bidi_sdb_cache; - -static void scsi_run_queue(struct request_queue *q); +struct kmem_cache *scsi_sdb_cache; /* - * Function: scsi_unprep_request() - * - * Purpose: Remove all preparation done for a request, including its - * associated scsi_cmnd, so that it can be requeued. - * - * Arguments: req - request to unprepare - * - * Lock status: Assumed that no locks are held upon entry. - * - * Returns: Nothing. + * When to reinvoke queueing after a resource shortage. It's 3 msecs to + * not change behaviour from the previous unplug mechanism, experimentation + * may prove this needs changing. */ -static void scsi_unprep_request(struct request *req) -{ - struct scsi_cmnd *cmd = req->special; - - req->cmd_flags &= ~REQ_DONTPREP; - req->special = NULL; - - scsi_put_command(cmd); -} +#define SCSI_QUEUE_DELAY 3 -/* - * Function: scsi_queue_insert() - * - * Purpose: Insert a command in the midlevel queue. - * - * Arguments: cmd - command that we are adding to queue. - * reason - why we are inserting command to queue. - * - * Lock status: Assumed that lock is not held upon entry. - * - * Returns: Nothing. - * - * Notes: We do this for one of two cases. Either the host is busy - * and it cannot accept any more commands for the time being, - * or the device returned QUEUE_FULL and can accept no more - * commands. - * Notes: This could be called either from an interrupt context or a - * normal process context. +/** + * __scsi_queue_insert - private queue insertion + * @cmd: The SCSI command being requeued + * @reason: The reason for the requeue + * @unbusy: Whether the queue should be unbusied + * + * This is a private queue insertion. The public interface + * scsi_queue_insert() always assumes the queue should be unbusied + * because it's always called before the completion. This function is + * for a requeue after completion, which should only occur in this + * file. */ -int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) +static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) { struct Scsi_Host *host = cmd->device->host; struct scsi_device *device = cmd->device; + struct scsi_target *starget = scsi_target(device); struct request_queue *q = device->request_queue; unsigned long flags; @@ -133,37 +111,62 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) * if a command is requeued with no other commands outstanding * either for the device or for the host. */ - if (reason == SCSI_MLQUEUE_HOST_BUSY) + switch (reason) { + case SCSI_MLQUEUE_HOST_BUSY: host->host_blocked = host->max_host_blocked; - else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) + break; + case SCSI_MLQUEUE_DEVICE_BUSY: + case SCSI_MLQUEUE_EH_RETRY: device->device_blocked = device->max_device_blocked; + break; + case SCSI_MLQUEUE_TARGET_BUSY: + starget->target_blocked = starget->max_target_blocked; + break; + } /* * Decrement the counters, since these commands are no longer * active on the host/device. */ - scsi_device_unbusy(device); + if (unbusy) + scsi_device_unbusy(device); /* * Requeue this command. It will go before all other commands - * that are already in the queue. - * - * NOTE: there is magic here about the way the queue is plugged if - * we have no outstanding commands. - * - * Although we *don't* plug the queue, we call the request - * function. The SCSI request function detects the blocked condition - * and plugs the queue appropriately. - */ + * that are already in the queue. Schedule requeue work under + * lock such that the kblockd_schedule_work() call happens + * before blk_cleanup_queue() finishes. + */ + cmd->result = 0; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); + kblockd_schedule_work(&device->requeue_work); spin_unlock_irqrestore(q->queue_lock, flags); - - scsi_run_queue(q); - - return 0; } +/* + * Function: scsi_queue_insert() + * + * Purpose: Insert a command in the midlevel queue. + * + * Arguments: cmd - command that we are adding to queue. + * reason - why we are inserting command to queue. + * + * Lock status: Assumed that lock is not held upon entry. + * + * Returns: Nothing. + * + * Notes: We do this for one of two cases. Either the host is busy + * and it cannot accept any more commands for the time being, + * or the device returned QUEUE_FULL and can accept no more + * commands. + * Notes: This could be called either from an interrupt context or a + * normal process context. + */ +void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) +{ + __scsi_queue_insert(cmd, reason, 1); +} /** * scsi_execute - insert request and wait for the result * @sdev: scsi device @@ -175,19 +178,24 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) * @timeout: request timeout in seconds * @retries: number of times to retry request * @flags: or into request flags; + * @resid: optional residual length * * returns the req->errors value which is the scsi_cmnd result * field. */ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, - unsigned char *sense, int timeout, int retries, int flags) + unsigned char *sense, int timeout, int retries, u64 flags, + int *resid) { struct request *req; int write = (data_direction == DMA_TO_DEVICE); int ret = DRIVER_ERROR << 24; req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); + if (!req) + return ret; + blk_rq_set_block_pc(req); if (bufflen && blk_rq_map_kern(sdev->request_queue, req, buffer, bufflen, __GFP_WAIT)) @@ -199,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, req->sense_len = 0; req->retries = retries; req->timeout = timeout; - req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; /* @@ -207,6 +214,17 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, */ blk_execute_rq(req->q, NULL, req, 1); + /* + * Some devices (USB mass-storage in particular) may transfer + * garbage data together with a residue indicating that the data + * is invalid. Prevent the garbage from being misinterpreted + * and prevent security leaks by zeroing out the excess data. + */ + if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) + memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); + + if (resid) + *resid = req->resid_len; ret = req->errors; out: blk_put_request(req); @@ -215,10 +233,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(scsi_execute); - -int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, +int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, - struct scsi_sense_hdr *sshdr, int timeout, int retries) + struct scsi_sense_hdr *sshdr, int timeout, int retries, + int *resid, u64 flags) { char *sense = NULL; int result; @@ -229,204 +247,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, return DRIVER_ERROR << 24; } result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, 0); + sense, timeout, retries, flags, resid); if (sshdr) scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); kfree(sense); return result; } -EXPORT_SYMBOL(scsi_execute_req); - -struct scsi_io_context { - void *data; - void (*done)(void *data, char *sense, int result, int resid); - char sense[SCSI_SENSE_BUFFERSIZE]; -}; - -static struct kmem_cache *scsi_io_context_cache; - -static void scsi_end_async(struct request *req, int uptodate) -{ - struct scsi_io_context *sioc = req->end_io_data; - - if (sioc->done) - sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); - - kmem_cache_free(scsi_io_context_cache, sioc); - __blk_put_request(req->q, req); -} - -static int scsi_merge_bio(struct request *rq, struct bio *bio) -{ - struct request_queue *q = rq->q; - - bio->bi_flags &= ~(1 << BIO_SEG_VALID); - if (rq_data_dir(rq) == WRITE) - bio->bi_rw |= (1 << BIO_RW); - blk_queue_bounce(q, &bio); - - return blk_rq_append_bio(q, rq, bio); -} - -static void scsi_bi_endio(struct bio *bio, int error) -{ - bio_put(bio); -} - -/** - * scsi_req_map_sg - map a scatterlist into a request - * @rq: request to fill - * @sgl: scatterlist - * @nsegs: number of elements - * @bufflen: len of buffer - * @gfp: memory allocation flags - * - * scsi_req_map_sg maps a scatterlist into a request so that the - * request can be sent to the block layer. We do not trust the scatterlist - * sent to use, as some ULDs use that struct to only organize the pages. - */ -static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, - int nsegs, unsigned bufflen, gfp_t gfp) -{ - struct request_queue *q = rq->q; - int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; - unsigned int data_len = bufflen, len, bytes, off; - struct scatterlist *sg; - struct page *page; - struct bio *bio = NULL; - int i, err, nr_vecs = 0; - - for_each_sg(sgl, sg, nsegs, i) { - page = sg_page(sg); - off = sg->offset; - len = sg->length; - - while (len > 0 && data_len > 0) { - /* - * sg sends a scatterlist that is larger than - * the data_len it wants transferred for certain - * IO sizes - */ - bytes = min_t(unsigned int, len, PAGE_SIZE - off); - bytes = min(bytes, data_len); - - if (!bio) { - nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); - nr_pages -= nr_vecs; - - bio = bio_alloc(gfp, nr_vecs); - if (!bio) { - err = -ENOMEM; - goto free_bios; - } - bio->bi_end_io = scsi_bi_endio; - } - - if (bio_add_pc_page(q, bio, page, bytes, off) != - bytes) { - bio_put(bio); - err = -EINVAL; - goto free_bios; - } - - if (bio->bi_vcnt >= nr_vecs) { - err = scsi_merge_bio(rq, bio); - if (err) { - bio_endio(bio, 0); - goto free_bios; - } - bio = NULL; - } - - page++; - len -= bytes; - data_len -=bytes; - off = 0; - } - } - - rq->buffer = rq->data = NULL; - rq->data_len = bufflen; - return 0; - -free_bios: - while ((bio = rq->bio) != NULL) { - rq->bio = bio->bi_next; - /* - * call endio instead of bio_put incase it was bounced - */ - bio_endio(bio, 0); - } - - return err; -} - -/** - * scsi_execute_async - insert request - * @sdev: scsi device - * @cmd: scsi command - * @cmd_len: length of scsi cdb - * @data_direction: DMA_TO_DEVICE, DMA_FROM_DEVICE, or DMA_NONE - * @buffer: data buffer (this can be a kernel buffer or scatterlist) - * @bufflen: len of buffer - * @use_sg: if buffer is a scatterlist this is the number of elements - * @timeout: request timeout in seconds - * @retries: number of times to retry request - * @privdata: data passed to done() - * @done: callback function when done - * @gfp: memory allocation flags - */ -int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, - int cmd_len, int data_direction, void *buffer, unsigned bufflen, - int use_sg, int timeout, int retries, void *privdata, - void (*done)(void *, char *, int, int), gfp_t gfp) -{ - struct request *req; - struct scsi_io_context *sioc; - int err = 0; - int write = (data_direction == DMA_TO_DEVICE); - - sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp); - if (!sioc) - return DRIVER_ERROR << 24; - - req = blk_get_request(sdev->request_queue, write, gfp); - if (!req) - goto free_sense; - req->cmd_type = REQ_TYPE_BLOCK_PC; - req->cmd_flags |= REQ_QUIET; - - if (use_sg) - err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); - else if (bufflen) - err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); - - if (err) - goto free_req; - - req->cmd_len = cmd_len; - memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ - memcpy(req->cmd, cmd, req->cmd_len); - req->sense = sioc->sense; - req->sense_len = 0; - req->timeout = timeout; - req->retries = retries; - req->end_io_data = sioc; - - sioc->data = privdata; - sioc->done = done; - - blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); - return 0; - -free_req: - blk_put_request(req); -free_sense: - kmem_cache_free(scsi_io_context_cache, sioc); - return DRIVER_ERROR << 24; -} -EXPORT_SYMBOL_GPL(scsi_execute_async); +EXPORT_SYMBOL(scsi_execute_req_flags); /* * Function: scsi_init_cmd_errh() @@ -445,16 +273,18 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) scsi_set_resid(cmd, 0); memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (cmd->cmd_len == 0) - cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); + cmd->cmd_len = scsi_command_size(cmd->cmnd); } void scsi_device_unbusy(struct scsi_device *sdev) { struct Scsi_Host *shost = sdev->host; + struct scsi_target *starget = scsi_target(sdev); unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); shost->host_busy--; + starget->target_busy--; if (unlikely(scsi_host_in_recovery(shost) && (shost->host_failed || shost->host_eh_scheduled))) scsi_eh_wakeup(shost); @@ -510,32 +340,42 @@ static void scsi_single_lun_run(struct scsi_device *current_sdev) spin_unlock_irqrestore(shost->host_lock, flags); } -/* - * Function: scsi_run_queue() - * - * Purpose: Select a proper request queue to serve next - * - * Arguments: q - last request's queue - * - * Returns: Nothing - * - * Notes: The previous command was completely finished, start - * a new one if possible. - */ -static void scsi_run_queue(struct request_queue *q) +static inline int scsi_device_is_busy(struct scsi_device *sdev) { - struct scsi_device *sdev = q->queuedata; - struct Scsi_Host *shost = sdev->host; - unsigned long flags; + if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked) + return 1; - if (scsi_target(sdev)->single_lun) - scsi_single_lun_run(sdev); + return 0; +} + +static inline int scsi_target_is_busy(struct scsi_target *starget) +{ + return ((starget->can_queue > 0 && + starget->target_busy >= starget->can_queue) || + starget->target_blocked); +} + +static inline int scsi_host_is_busy(struct Scsi_Host *shost) +{ + if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || + shost->host_blocked || shost->host_self_blocked) + return 1; + + return 0; +} + +static void scsi_starved_list_run(struct Scsi_Host *shost) +{ + LIST_HEAD(starved_list); + struct scsi_device *sdev; + unsigned long flags; spin_lock_irqsave(shost->host_lock, flags); - while (!list_empty(&shost->starved_list) && - !shost->host_blocked && !shost->host_self_blocked && - !((shost->can_queue > 0) && - (shost->host_busy >= shost->can_queue))) { + list_splice_init(&shost->starved_list, &starved_list); + + while (!list_empty(&starved_list)) { + struct request_queue *slq; + /* * As long as shost is accepting commands and we have * starved queues, call blk_run_queue. scsi_request_fn @@ -546,35 +386,77 @@ static void scsi_run_queue(struct request_queue *q) * scsi_request_fn must get the host_lock before checking * or modifying starved_list or starved_entry. */ - sdev = list_entry(shost->starved_list.next, - struct scsi_device, starved_entry); + if (scsi_host_is_busy(shost)) + break; + + sdev = list_entry(starved_list.next, + struct scsi_device, starved_entry); list_del_init(&sdev->starved_entry); - spin_unlock_irqrestore(shost->host_lock, flags); + if (scsi_target_is_busy(scsi_target(sdev))) { + list_move_tail(&sdev->starved_entry, + &shost->starved_list); + continue; + } + /* + * Once we drop the host lock, a racing scsi_remove_device() + * call may remove the sdev from the starved list and destroy + * it and the queue. Mitigate by taking a reference to the + * queue and never touching the sdev again after we drop the + * host lock. Note: if __scsi_remove_device() invokes + * blk_cleanup_queue() before the queue is run from this + * function then blk_run_queue() will return immediately since + * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. + */ + slq = sdev->request_queue; + if (!blk_get_queue(slq)) + continue; + spin_unlock_irqrestore(shost->host_lock, flags); - if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && - !test_and_set_bit(QUEUE_FLAG_REENTER, - &sdev->request_queue->queue_flags)) { - blk_run_queue(sdev->request_queue); - clear_bit(QUEUE_FLAG_REENTER, - &sdev->request_queue->queue_flags); - } else - blk_run_queue(sdev->request_queue); + blk_run_queue(slq); + blk_put_queue(slq); spin_lock_irqsave(shost->host_lock, flags); - if (unlikely(!list_empty(&sdev->starved_entry))) - /* - * sdev lost a race, and was put back on the - * starved list. This is unlikely but without this - * in theory we could loop forever. - */ - break; } + /* put any unprocessed entries back */ + list_splice(&starved_list, &shost->starved_list); spin_unlock_irqrestore(shost->host_lock, flags); +} + +/* + * Function: scsi_run_queue() + * + * Purpose: Select a proper request queue to serve next + * + * Arguments: q - last request's queue + * + * Returns: Nothing + * + * Notes: The previous command was completely finished, start + * a new one if possible. + */ +static void scsi_run_queue(struct request_queue *q) +{ + struct scsi_device *sdev = q->queuedata; + + if (scsi_target(sdev)->single_lun) + scsi_single_lun_run(sdev); + if (!list_empty(&sdev->host->starved_list)) + scsi_starved_list_run(sdev->host); blk_run_queue(q); } +void scsi_requeue_run_queue(struct work_struct *work) +{ + struct scsi_device *sdev; + struct request_queue *q; + + sdev = container_of(work, struct scsi_device, requeue_work); + q = sdev->request_queue; + scsi_run_queue(q); +} + /* * Function: scsi_requeue_command() * @@ -595,15 +477,20 @@ static void scsi_run_queue(struct request_queue *q) */ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) { + struct scsi_device *sdev = cmd->device; struct request *req = cmd->request; unsigned long flags; - scsi_unprep_request(req); spin_lock_irqsave(q->queue_lock, flags); + blk_unprep_request(req); + req->special = NULL; + scsi_put_command(cmd); blk_requeue_request(q, req); spin_unlock_irqrestore(q->queue_lock, flags); scsi_run_queue(q); + + put_device(&sdev->sdev_gendev); } void scsi_next_command(struct scsi_cmnd *cmd) @@ -611,13 +498,9 @@ void scsi_next_command(struct scsi_cmnd *cmd) struct scsi_device *sdev = cmd->device; struct request_queue *q = sdev->request_queue; - /* need to hold a reference on the device before we let go of the cmd */ - get_device(&sdev->sdev_gendev); - scsi_put_command(cmd); scsi_run_queue(q); - /* ok to remove device now */ put_device(&sdev->sdev_gendev); } @@ -629,69 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost) scsi_run_queue(sdev->request_queue); } -/* - * Function: scsi_end_request() - * - * Purpose: Post-processing of completed commands (usually invoked at end - * of upper level post-processing and scsi_io_completion). - * - * Arguments: cmd - command that is complete. - * error - 0 if I/O indicates success, < 0 for I/O error. - * bytes - number of bytes of completed I/O - * requeue - indicates whether we should requeue leftovers. - * - * Lock status: Assumed that lock is not held upon entry. - * - * Returns: cmd if requeue required, NULL otherwise. - * - * Notes: This is called for block device requests in order to - * mark some number of sectors as complete. - * - * We are guaranteeing that the request queue will be goosed - * at some point during this call. - * Notes: If cmd was requeued, upon return it will be a stale pointer. - */ -static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, - int bytes, int requeue) -{ - struct request_queue *q = cmd->device->request_queue; - struct request *req = cmd->request; - - /* - * If there are blocks left over at the end, set up the command - * to queue the remainder of them. - */ - if (blk_end_request(req, error, bytes)) { - int leftover = (req->hard_nr_sectors << 9); - - if (blk_pc_request(req)) - leftover = req->data_len; - - /* kill remainder if no retrys */ - if (error && blk_noretry_request(req)) - blk_end_request(req, error, leftover); - else { - if (requeue) { - /* - * Bleah. Leftovers again. Stick the - * leftovers in the front of the - * queue, and goose the queue again. - */ - scsi_requeue_command(q, cmd); - cmd = NULL; - } - return cmd; - } - } - - /* - * This will goose the queue request function at the end, so we don't - * need to worry about launching another command. - */ - scsi_next_command(cmd); - return NULL; -} - static inline unsigned int scsi_sgtable_index(unsigned short nents) { unsigned int index; @@ -746,7 +566,7 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb) /* * Function: scsi_release_buffers() * - * Purpose: Completion processing for block device I/O requests. + * Purpose: Free resources allocate for a scsi_command. * * Arguments: cmd - command that we are bailing. * @@ -757,8 +577,7 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb) * Notes: In the event that an upper level driver rejects a * command, we must release resources allocated during * the __init_io() function. Primarily this would involve - * the scatter-gather table, and potentially any bounce - * buffers. + * the scatter-gather table. */ void scsi_release_buffers(struct scsi_cmnd *cmd) { @@ -767,42 +586,64 @@ void scsi_release_buffers(struct scsi_cmnd *cmd) memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - if (scsi_bidi_cmnd(cmd)) { - struct scsi_data_buffer *bidi_sdb = - cmd->request->next_rq->special; - scsi_free_sgtable(bidi_sdb); - kmem_cache_free(scsi_bidi_sdb_cache, bidi_sdb); - cmd->request->next_rq->special = NULL; - } + if (scsi_prot_sg_count(cmd)) + scsi_free_sgtable(cmd->prot_sdb); } EXPORT_SYMBOL(scsi_release_buffers); -/* - * Bidi commands Must be complete as a whole, both sides at once. - * If part of the bytes were written and lld returned - * scsi_in()->resid and/or scsi_out()->resid this information will be left - * in req->data_len and req->next_rq->data_len. The upper-layer driver can - * decide what to do with this information. - */ -void scsi_end_bidi_request(struct scsi_cmnd *cmd) +static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) { - struct request *req = cmd->request; - unsigned int dlen = req->data_len; - unsigned int next_dlen = req->next_rq->data_len; + struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; - req->data_len = scsi_out(cmd)->resid; - req->next_rq->data_len = scsi_in(cmd)->resid; + scsi_free_sgtable(bidi_sdb); + kmem_cache_free(scsi_sdb_cache, bidi_sdb); + cmd->request->next_rq->special = NULL; +} - /* The req and req->next_rq have not been completed */ - BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); +/** + * __scsi_error_from_host_byte - translate SCSI error code into errno + * @cmd: SCSI command (unused) + * @result: scsi error code + * + * Translate SCSI error code into standard UNIX errno. + * Return values: + * -ENOLINK temporary transport failure + * -EREMOTEIO permanent target failure, do not retry + * -EBADE permanent nexus failure, retry on other path + * -ENOSPC No write space available + * -ENODATA Medium error + * -EIO unspecified I/O error + */ +static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) +{ + int error = 0; - scsi_release_buffers(cmd); + switch(host_byte(result)) { + case DID_TRANSPORT_FAILFAST: + error = -ENOLINK; + break; + case DID_TARGET_FAILURE: + set_host_byte(cmd, DID_OK); + error = -EREMOTEIO; + break; + case DID_NEXUS_FAILURE: + set_host_byte(cmd, DID_OK); + error = -EBADE; + break; + case DID_ALLOC_FAILURE: + set_host_byte(cmd, DID_OK); + error = -ENOSPC; + break; + case DID_MEDIUM_ERROR: + set_host_byte(cmd, DID_OK); + error = -ENODATA; + break; + default: + error = -EIO; + break; + } - /* - * This will goose the queue request function at the end, so we don't - * need to worry about launching another command. - */ - scsi_next_command(cmd); + return error; } /* @@ -816,33 +657,37 @@ void scsi_end_bidi_request(struct scsi_cmnd *cmd) * * Returns: Nothing * - * Notes: This function is matched in terms of capabilities to - * the function that created the scatter-gather list. - * In other words, if there are no bounce buffers - * (the normal case for most drivers), we don't need - * the logic to deal with cleaning up afterwards. + * Notes: We will finish off the specified number of sectors. If we + * are done, the command block will be released and the queue + * function will be goosed. If we are not done then we have to + * figure out what to do next: * - * We must do one of several things here: + * a) We can call scsi_requeue_command(). The request + * will be unprepared and put back on the queue. Then + * a new command will be created for it. This should + * be used if we made forward progress, or if we want + * to switch from READ(10) to READ(6) for example. * - * a) Call scsi_end_request. This will finish off the - * specified number of sectors. If we are done, the - * command block will be released, and the queue - * function will be goosed. If we are not done, then - * scsi_end_request will directly goose the queue. + * b) We can call __scsi_queue_insert(). The request will + * be put back on the queue and retried using the same + * command as before, possibly after a delay. * - * b) We can just use scsi_requeue_command() here. This would - * be used if we just wanted to retry, for example. + * c) We can call blk_end_request() with -EIO to fail + * the remainder of the request. */ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) { int result = cmd->result; - int this_count = scsi_bufflen(cmd); struct request_queue *q = cmd->device->request_queue; struct request *req = cmd->request; - int clear_errors = 1; + int error = 0; struct scsi_sense_hdr sshdr; int sense_valid = 0; int sense_deferred = 0; + enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, + ACTION_DELAYED_RETRY} action; + char *description = NULL; + unsigned long wait_for = (cmd->allowed + 1) * req->timeout; if (result) { sense_valid = scsi_command_normalize_sense(cmd, &sshdr); @@ -850,10 +695,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) sense_deferred = scsi_sense_is_deferred(&sshdr); } - if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ - req->errors = result; + if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ if (result) { - clear_errors = 0; if (sense_valid && req->sense) { /* * SG_IO wants current and deferred errors @@ -865,40 +708,102 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) memcpy(req->sense, cmd->sense_buffer, len); req->sense_len = len; } + if (!sense_deferred) + error = __scsi_error_from_host_byte(cmd, result); } + /* + * __scsi_error_from_host_byte may have reset the host_byte + */ + req->errors = cmd->result; + + req->resid_len = scsi_get_resid(cmd); + if (scsi_bidi_cmnd(cmd)) { - /* will also release_buffers */ - scsi_end_bidi_request(cmd); + /* + * Bidi commands Must be complete as a whole, + * both sides at once. + */ + req->next_rq->resid_len = scsi_in(cmd)->resid; + + scsi_release_buffers(cmd); + scsi_release_bidi_buffers(cmd); + + blk_end_request_all(req, 0); + + scsi_next_command(cmd); return; } - req->data_len = scsi_get_resid(cmd); + } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { + /* + * Certain non BLOCK_PC requests are commands that don't + * actually transfer anything (FLUSH), so cannot use + * good_bytes != blk_rq_bytes(req) as the signal for an error. + * This sets the error explicitly for the problem case. + */ + error = __scsi_error_from_host_byte(cmd, result); } - BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ - scsi_release_buffers(cmd); + /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ + BUG_ON(blk_bidi_rq(req)); /* * Next deal with any sectors which we were able to correctly * handle. */ - SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " + SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, " "%d bytes done.\n", - req->nr_sectors, good_bytes)); + blk_rq_sectors(req), good_bytes)); + + /* + * Recovered errors need reporting, but they're always treated + * as success, so fiddle the result code here. For BLOCK_PC + * we already took a copy of the original into rq->errors which + * is what gets returned to the user + */ + if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { + /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip + * print since caller wants ATA registers. Only occurs on + * SCSI ATA PASS_THROUGH commands when CK_COND=1 + */ + if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) + ; + else if (!(req->cmd_flags & REQ_QUIET)) + scsi_print_sense("", cmd); + result = 0; + /* BLOCK_PC may have set error */ + error = 0; + } - if (clear_errors) - req->errors = 0; + /* + * If we finished all bytes in the request we are done now. + */ + if (!blk_end_request(req, error, good_bytes)) + goto next_command; - /* A number of bytes were successfully read. If there - * are leftovers and there is some kind of error - * (result != 0), retry the rest. + /* + * Kill remainder if no retrys. */ - if (scsi_end_request(cmd, 0, good_bytes, result == 0) == NULL) - return; + if (error && scsi_noretry_cmd(cmd)) { + blk_end_request_all(req, error); + goto next_command; + } - /* good_bytes = 0, or (inclusive) there were leftovers and - * result = 0, so scsi_end_request couldn't retry. + /* + * If there had been no error, but we have leftover bytes in the + * requeues just queue the command up again. */ - if (sense_valid && !sense_deferred) { + if (result == 0) + goto requeue; + + error = __scsi_error_from_host_byte(cmd, result); + + if (host_byte(result) == DID_RESET) { + /* Third party bus reset or reset for error recovery + * reasons. Just retry the command and see what + * happens. + */ + action = ACTION_RETRY; + } else if (sense_valid && !sense_deferred) { switch (sshdr.sense_key) { case UNIT_ATTENTION: if (cmd->device->removable) { @@ -906,16 +811,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) * and quietly refuse further access. */ cmd->device->changed = 1; - scsi_end_request(cmd, -EIO, this_count, 1); - return; + description = "Media Changed"; + action = ACTION_FAIL; } else { /* Must have been a power glitch, or a * bus reset. Could not have been a * media change, so we just retry the - * request and see what happens. + * command and see what happens. */ - scsi_requeue_command(q, cmd); - return; + action = ACTION_RETRY; } break; case ILLEGAL_REQUEST: @@ -931,15 +835,41 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) sshdr.asc == 0x20 && sshdr.ascq == 0x00) && (cmd->cmnd[0] == READ_10 || cmd->cmnd[0] == WRITE_10)) { + /* This will issue a new 6-byte command. */ cmd->device->use_10_for_rw = 0; - /* This will cause a retry with a - * 6-byte command. - */ - scsi_requeue_command(q, cmd); - return; - } else { - scsi_end_request(cmd, -EIO, this_count, 1); - return; + action = ACTION_REPREP; + } else if (sshdr.asc == 0x10) /* DIX */ { + description = "Host Data Integrity Failure"; + action = ACTION_FAIL; + error = -EILSEQ; + /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ + } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { + switch (cmd->cmnd[0]) { + case UNMAP: + description = "Discard failure"; + break; + case WRITE_SAME: + case WRITE_SAME_16: + if (cmd->cmnd[1] & 0x8) + description = "Discard failure"; + else + description = + "Write same failure"; + break; + default: + description = "Invalid command failure"; + break; + } + action = ACTION_FAIL; + error = -EREMOTEIO; + } else + action = ACTION_FAIL; + break; + case ABORTED_COMMAND: + action = ACTION_FAIL; + if (sshdr.asc == 0x10) { /* DIF */ + description = "Target Data Integrity Failure"; + error = -EILSEQ; } break; case NOT_READY: @@ -955,49 +885,76 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) case 0x07: /* operation in progress */ case 0x08: /* Long write in progress */ case 0x09: /* self test in progress */ - scsi_requeue_command(q, cmd); - return; + case 0x14: /* space allocation in progress */ + action = ACTION_DELAYED_RETRY; + break; default: + description = "Device not ready"; + action = ACTION_FAIL; break; } + } else { + description = "Device not ready"; + action = ACTION_FAIL; } - if (!(req->cmd_flags & REQ_QUIET)) - scsi_cmd_print_sense_hdr(cmd, - "Device not ready", - &sshdr); - - scsi_end_request(cmd, -EIO, this_count, 1); - return; + break; case VOLUME_OVERFLOW: - if (!(req->cmd_flags & REQ_QUIET)) { - scmd_printk(KERN_INFO, cmd, - "Volume overflow, CDB: "); - __scsi_print_command(cmd->cmnd); - scsi_print_sense("", cmd); - } /* See SSC3rXX or current. */ - scsi_end_request(cmd, -EIO, this_count, 1); - return; + action = ACTION_FAIL; + break; default: + description = "Unhandled sense code"; + action = ACTION_FAIL; break; } + } else { + description = "Unhandled error code"; + action = ACTION_FAIL; } - if (host_byte(result) == DID_RESET) { - /* Third party bus reset or reset for error recovery - * reasons. Just retry the request and see what - * happens. - */ - scsi_requeue_command(q, cmd); - return; + + if (action != ACTION_FAIL && + time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { + action = ACTION_FAIL; + description = "Command timed out"; } - if (result) { + + switch (action) { + case ACTION_FAIL: + /* Give up and fail the remainder of the request */ if (!(req->cmd_flags & REQ_QUIET)) { + if (description) + scmd_printk(KERN_INFO, cmd, "%s\n", + description); scsi_print_result(cmd); if (driver_byte(result) & DRIVER_SENSE) scsi_print_sense("", cmd); + scsi_print_command(cmd); } + if (!blk_end_request_err(req, error)) + goto next_command; + /*FALLTHRU*/ + case ACTION_REPREP: + requeue: + /* Unprep the request and put it back at the head of the queue. + * A new command will be prepared and issued. + */ + scsi_release_buffers(cmd); + scsi_requeue_command(q, cmd); + break; + case ACTION_RETRY: + /* Retry the same command immediately */ + __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0); + break; + case ACTION_DELAYED_RETRY: + /* Retry the same command after a delay */ + __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0); + break; } - scsi_end_request(cmd, -EIO, this_count, !result); + return; + +next_command: + scsi_release_buffers(cmd); + scsi_next_command(cmd); } static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, @@ -1013,8 +970,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, return BLKPREP_DEFER; } - req->buffer = NULL; - /* * Next, walk the list, and fill in the addresses and sizes of * each segment. @@ -1022,10 +977,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, count = blk_rq_map_sg(req->q, req, sdb->table.sgl); BUG_ON(count > sdb->table.nents); sdb->table.nents = count; - if (blk_pc_request(req)) - sdb->length = req->data_len; - else - sdb->length = req->nr_sectors << 9; + sdb->length = blk_rq_bytes(req); return BLKPREP_OK; } @@ -1042,34 +994,55 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, */ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) { - int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); + struct scsi_device *sdev = cmd->device; + struct request *rq = cmd->request; + + int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); if (error) goto err_exit; - if (blk_bidi_rq(cmd->request)) { + if (blk_bidi_rq(rq)) { struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( - scsi_bidi_sdb_cache, GFP_ATOMIC); + scsi_sdb_cache, GFP_ATOMIC); if (!bidi_sdb) { error = BLKPREP_DEFER; goto err_exit; } - cmd->request->next_rq->special = bidi_sdb; - error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, - GFP_ATOMIC); + rq->next_rq->special = bidi_sdb; + error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC); if (error) goto err_exit; } + if (blk_integrity_rq(rq)) { + struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; + int ivecs, count; + + BUG_ON(prot_sdb == NULL); + ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); + + if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { + error = BLKPREP_DEFER; + goto err_exit; + } + + count = blk_rq_map_integrity_sg(rq->q, rq->bio, + prot_sdb->table.sgl); + BUG_ON(unlikely(count > ivecs)); + BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); + + cmd->prot_sdb = prot_sdb; + cmd->prot_sdb->table.nents = count; + } + return BLKPREP_OK ; err_exit: scsi_release_buffers(cmd); - if (error == BLKPREP_KILL) - scsi_put_command(cmd); - else /* BLKPREP_DEFER */ - scsi_unprep_request(cmd->request); - + cmd->request->special = NULL; + scsi_put_command(cmd); + put_device(&sdev->sdev_gendev); return error; } EXPORT_SYMBOL(scsi_init_io); @@ -1080,9 +1053,15 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, struct scsi_cmnd *cmd; if (!req->special) { + /* Bail if we can't get a reference to the device */ + if (!get_device(&sdev->sdev_gendev)) + return NULL; + cmd = scsi_get_command(sdev, GFP_ATOMIC); - if (unlikely(!cmd)) + if (unlikely(!cmd)) { + put_device(&sdev->sdev_gendev); return NULL; + } req->special = cmd; } else { cmd = req->special; @@ -1092,20 +1071,15 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, cmd->tag = req->tag; cmd->request = req; + cmd->cmnd = req->cmd; + cmd->prot_op = SCSI_PROT_NORMAL; + return cmd; } int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd; - int ret = scsi_prep_state_check(sdev, req); - - if (ret != BLKPREP_OK) - return ret; - - cmd = scsi_get_cmd_from_req(sdev, req); - if (unlikely(!cmd)) - return BLKPREP_DEFER; + struct scsi_cmnd *cmd = req->special; /* * BLOCK_PC requests may transfer data, in which case they must @@ -1122,26 +1096,21 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) if (unlikely(ret)) return ret; } else { - BUG_ON(req->data_len); - BUG_ON(req->data); + BUG_ON(blk_rq_bytes(req)); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - req->buffer = NULL; } - BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); - memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); cmd->cmd_len = req->cmd_len; - if (!req->data_len) + if (!blk_rq_bytes(req)) cmd->sc_data_direction = DMA_NONE; else if (rq_data_dir(req) == WRITE) cmd->sc_data_direction = DMA_TO_DEVICE; else cmd->sc_data_direction = DMA_FROM_DEVICE; - cmd->transfersize = req->data_len; + cmd->transfersize = blk_rq_bytes(req); cmd->allowed = req->retries; - cmd->timeout_per_command = req->timeout; return BLKPREP_OK; } EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); @@ -1153,25 +1122,27 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); */ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) { - struct scsi_cmnd *cmd; - int ret = scsi_prep_state_check(sdev, req); + struct scsi_cmnd *cmd = req->special; + + if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh + && sdev->scsi_dh_data->scsi_dh->prep_fn)) { + int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); + if (ret != BLKPREP_OK) + return ret; + } - if (ret != BLKPREP_OK) - return ret; /* * Filesystem requests must transfer data. */ BUG_ON(!req->nr_phys_segments); - cmd = scsi_get_cmd_from_req(sdev, req); - if (unlikely(!cmd)) - return BLKPREP_DEFER; - + memset(cmd->cmnd, 0, BLK_MAX_CDB); return scsi_init_io(cmd, GFP_ATOMIC); } EXPORT_SYMBOL(scsi_setup_fs_cmnd); -int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) +static int +scsi_prep_state_check(struct scsi_device *sdev, struct request *req) { int ret = BLKPREP_OK; @@ -1182,6 +1153,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { switch (sdev->sdev_state) { case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: /* * If the device is offline we refuse to process any * commands. The device must be brought online @@ -1202,6 +1174,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) break; case SDEV_QUIESCE: case SDEV_BLOCK: + case SDEV_CREATED_BLOCK: /* * If the devices is blocked we defer normal commands. */ @@ -1221,9 +1194,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) } return ret; } -EXPORT_SYMBOL(scsi_prep_state_check); -int scsi_prep_return(struct request_queue *q, struct request *req, int ret) +static int +scsi_prep_return(struct request_queue *q, struct request *req, int ret) { struct scsi_device *sdev = q->queuedata; @@ -1235,17 +1208,18 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) struct scsi_cmnd *cmd = req->special; scsi_release_buffers(cmd); scsi_put_command(cmd); + put_device(&sdev->sdev_gendev); req->special = NULL; } break; case BLKPREP_DEFER: /* - * If we defer, the elv_next_request() returns NULL, but the - * queue must be restarted, so we plug here if no returning - * command will automatically do that. + * If we defer, the blk_peek_request() returns NULL, but the + * queue must be restarted, so we schedule a callback to happen + * shortly. */ if (sdev->device_busy == 0) - blk_plug_device(q); + blk_delay_queue(q, SCSI_QUEUE_DELAY); break; default: req->cmd_flags |= REQ_DONTPREP; @@ -1253,18 +1227,45 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret) return ret; } -EXPORT_SYMBOL(scsi_prep_return); -int scsi_prep_fn(struct request_queue *q, struct request *req) +static int scsi_prep_fn(struct request_queue *q, struct request *req) { struct scsi_device *sdev = q->queuedata; - int ret = BLKPREP_KILL; + struct scsi_cmnd *cmd; + int ret; + + ret = scsi_prep_state_check(sdev, req); + if (ret != BLKPREP_OK) + goto out; - if (req->cmd_type == REQ_TYPE_BLOCK_PC) + cmd = scsi_get_cmd_from_req(sdev, req); + if (unlikely(!cmd)) { + ret = BLKPREP_DEFER; + goto out; + } + + if (req->cmd_type == REQ_TYPE_FS) + ret = scsi_cmd_to_driver(cmd)->init_command(cmd); + else if (req->cmd_type == REQ_TYPE_BLOCK_PC) ret = scsi_setup_blk_pc_cmnd(sdev, req); + else + ret = BLKPREP_KILL; + +out: return scsi_prep_return(q, req, ret); } +static void scsi_unprep_fn(struct request_queue *q, struct request *req) +{ + if (req->cmd_type == REQ_TYPE_FS) { + struct scsi_cmnd *cmd = req->special; + struct scsi_driver *drv = scsi_cmd_to_driver(cmd); + + if (drv->uninit_command) + drv->uninit_command(cmd); + } +} + /* * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else * return 0. @@ -1274,8 +1275,6 @@ int scsi_prep_fn(struct request_queue *q, struct request *req) static inline int scsi_dev_queue_ready(struct request_queue *q, struct scsi_device *sdev) { - if (sdev->device_busy >= sdev->queue_depth) - return 0; if (sdev->device_busy == 0 && sdev->device_blocked) { /* * unblock after device_blocked iterates to zero @@ -1285,12 +1284,50 @@ static inline int scsi_dev_queue_ready(struct request_queue *q, sdev_printk(KERN_INFO, sdev, "unblocking device at zero depth\n")); } else { - blk_plug_device(q); + blk_delay_queue(q, SCSI_QUEUE_DELAY); return 0; } } - if (sdev->device_blocked) + if (scsi_device_is_busy(sdev)) + return 0; + + return 1; +} + + +/* + * scsi_target_queue_ready: checks if there we can send commands to target + * @sdev: scsi device on starget to check. + * + * Called with the host lock held. + */ +static inline int scsi_target_queue_ready(struct Scsi_Host *shost, + struct scsi_device *sdev) +{ + struct scsi_target *starget = scsi_target(sdev); + + if (starget->single_lun) { + if (starget->starget_sdev_user && + starget->starget_sdev_user != sdev) + return 0; + starget->starget_sdev_user = sdev; + } + + if (starget->target_busy == 0 && starget->target_blocked) { + /* + * unblock after target_blocked iterates to zero + */ + if (--starget->target_blocked == 0) { + SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, + "unblocking target at zero depth\n")); + } else + return 0; + } + + if (scsi_target_is_busy(starget)) { + list_move_tail(&sdev->starved_entry, &shost->starved_list); return 0; + } return 1; } @@ -1317,12 +1354,10 @@ static inline int scsi_host_queue_ready(struct request_queue *q, printk("scsi%d unblocking host at zero depth\n", shost->host_no)); } else { - blk_plug_device(q); return 0; } } - if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || - shost->host_blocked || shost->host_self_blocked) { + if (scsi_host_is_busy(shost)) { if (list_empty(&sdev->starved_entry)) list_add_tail(&sdev->starved_entry, &shost->starved_list); return 0; @@ -1336,22 +1371,56 @@ static inline int scsi_host_queue_ready(struct request_queue *q, } /* + * Busy state exporting function for request stacking drivers. + * + * For efficiency, no lock is taken to check the busy state of + * shost/starget/sdev, since the returned value is not guaranteed and + * may be changed after request stacking drivers call the function, + * regardless of taking lock or not. + * + * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi + * needs to return 'not busy'. Otherwise, request stacking drivers + * may hold requests forever. + */ +static int scsi_lld_busy(struct request_queue *q) +{ + struct scsi_device *sdev = q->queuedata; + struct Scsi_Host *shost; + + if (blk_queue_dying(q)) + return 0; + + shost = sdev->host; + + /* + * Ignore host/starget busy state. + * Since block layer does not have a concept of fairness across + * multiple queues, congestion of host/starget needs to be handled + * in SCSI layer. + */ + if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) + return 1; + + return 0; +} + +/* * Kill a request for a dead device */ static void scsi_kill_request(struct request *req, struct request_queue *q) { struct scsi_cmnd *cmd = req->special; - struct scsi_device *sdev = cmd->device; - struct Scsi_Host *shost = sdev->host; + struct scsi_device *sdev; + struct scsi_target *starget; + struct Scsi_Host *shost; - blkdev_dequeue_request(req); + blk_start_request(req); - if (unlikely(cmd == NULL)) { - printk(KERN_CRIT "impossible request in %s.\n", - __FUNCTION__); - BUG(); - } + scmd_printk(KERN_INFO, cmd, "killing request\n"); + sdev = cmd->device; + starget = scsi_target(sdev); + shost = sdev->host; scsi_init_cmd_errh(cmd); cmd->result = DID_NO_CONNECT << 16; atomic_inc(&cmd->device->iorequest_cnt); @@ -1365,20 +1434,25 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) spin_unlock(sdev->request_queue->queue_lock); spin_lock(shost->host_lock); shost->host_busy++; + starget->target_busy++; spin_unlock(shost->host_lock); spin_lock(sdev->request_queue->queue_lock); - __scsi_done(cmd); + blk_complete_request(req); } static void scsi_softirq_done(struct request *rq) { - struct scsi_cmnd *cmd = rq->completion_data; - unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; + struct scsi_cmnd *cmd = rq->special; + unsigned long wait_for = (cmd->allowed + 1) * rq->timeout; int disposition; INIT_LIST_HEAD(&cmd->eh_entry); + atomic_inc(&cmd->device->iodone_cnt); + if (cmd->result) + atomic_inc(&cmd->device->ioerr_cnt); + disposition = scsi_decide_disposition(cmd); if (disposition != SUCCESS && time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { @@ -1418,36 +1492,27 @@ static void scsi_softirq_done(struct request *rq) * Lock status: IO request lock assumed to be held when called. */ static void scsi_request_fn(struct request_queue *q) + __releases(q->queue_lock) + __acquires(q->queue_lock) { struct scsi_device *sdev = q->queuedata; struct Scsi_Host *shost; struct scsi_cmnd *cmd; struct request *req; - if (!sdev) { - printk("scsi: killing requests for dead queue\n"); - while ((req = elv_next_request(q)) != NULL) - scsi_kill_request(req, q); - return; - } - - if(!get_device(&sdev->sdev_gendev)) - /* We must be tearing the block queue down already */ - return; - /* * To start with, we keep looping until the queue is empty, or until * the host is no longer able to accept any more requests. */ shost = sdev->host; - while (!blk_queue_plugged(q)) { + for (;;) { int rtn; /* * get next queueable request. We do this early to make sure * that the request is fully prepared even if we cannot * accept it. */ - req = elv_next_request(q); + req = blk_peek_request(q); if (!req || !scsi_dev_queue_ready(q, sdev)) break; @@ -1463,7 +1528,7 @@ static void scsi_request_fn(struct request_queue *q) * Remove the request from the request list. */ if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) - blkdev_dequeue_request(req); + blk_start_request(req); sdev->device_busy++; spin_unlock(q->queue_lock); @@ -1472,20 +1537,34 @@ static void scsi_request_fn(struct request_queue *q) printk(KERN_CRIT "impossible request in %s.\n" "please mail a stack trace to " "linux-scsi@vger.kernel.org\n", - __FUNCTION__); + __func__); blk_dump_rq_flags(req, "foo"); BUG(); } spin_lock(shost->host_lock); - if (!scsi_host_queue_ready(q, shost, sdev)) + /* + * We hit this when the driver is using a host wide + * tag map. For device level tag maps the queue_depth check + * in the device ready fn would prevent us from trying + * to allocate a tag. Since the map is a shared host resource + * we add the dev to the starved list so it eventually gets + * a run when a tag is freed. + */ + if (blk_queue_tagged(q) && !blk_rq_tagged(req)) { + if (list_empty(&sdev->starved_entry)) + list_add_tail(&sdev->starved_entry, + &shost->starved_list); goto not_ready; - if (scsi_target(sdev)->single_lun) { - if (scsi_target(sdev)->starget_sdev_user && - scsi_target(sdev)->starget_sdev_user != sdev) - goto not_ready; - scsi_target(sdev)->starget_sdev_user = sdev; } + + if (!scsi_target_queue_ready(shost, sdev)) + goto not_ready; + + if (!scsi_host_queue_ready(q, shost, sdev)) + goto not_ready; + + scsi_target(sdev)->target_busy++; shost->host_busy++; /* @@ -1505,18 +1584,11 @@ static void scsi_request_fn(struct request_queue *q) */ rtn = scsi_dispatch_cmd(cmd); spin_lock_irq(q->queue_lock); - if(rtn) { - /* we're refusing the command; because of - * the way locks get dropped, we need to - * check here if plugging is required */ - if(sdev->device_busy == 0) - blk_plug_device(q); - - break; - } + if (rtn) + goto out_delay; } - goto out; + return; not_ready: spin_unlock_irq(shost->host_lock); @@ -1532,14 +1604,9 @@ static void scsi_request_fn(struct request_queue *q) spin_lock_irq(q->queue_lock); blk_requeue_request(q, req); sdev->device_busy--; - if(sdev->device_busy == 0) - blk_plug_device(q); - out: - /* must be careful here...if we trigger the ->remove() function - * we cannot be holding the q lock */ - spin_unlock_irq(q->queue_lock); - put_device(&sdev->sdev_gendev); - spin_lock_irq(q->queue_lock); +out_delay: + if (sdev->device_busy == 0) + blk_delay_queue(q, SCSI_QUEUE_DELAY); } u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) @@ -1558,7 +1625,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) host_dev = scsi_get_device(shost); if (host_dev && host_dev->dma_mask) - bounce_limit = *host_dev->dma_mask; + bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT; return bounce_limit; } @@ -1568,7 +1635,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, request_fn_proc *request_fn) { struct request_queue *q; - struct device *dev = shost->shost_gendev.parent; + struct device *dev = shost->dma_dev; q = blk_init_queue(request_fn, NULL); if (!q) @@ -1577,10 +1644,18 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, /* * this limit is imposed by hardware restrictions */ - blk_queue_max_hw_segments(q, shost->sg_tablesize); - blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); + blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, + SCSI_MAX_SG_CHAIN_SEGMENTS)); + + if (scsi_host_prot_dma(shost)) { + shost->sg_prot_tablesize = + min_not_zero(shost->sg_prot_tablesize, + (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); + BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); + blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); + } - blk_queue_max_sectors(q, shost->max_sectors); + blk_queue_max_hw_sectors(q, shost->max_sectors); blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); blk_queue_segment_boundary(q, shost->dma_boundary); dma_set_seg_boundary(dev, shost->dma_boundary); @@ -1588,7 +1663,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); if (!shost->use_clustering) - clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); + q->limits.cluster = 0; /* * set a reasonable default alignment on word boundaries: the @@ -1610,15 +1685,13 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) return NULL; blk_queue_prep_rq(q, scsi_prep_fn); + blk_queue_unprep_rq(q, scsi_unprep_fn); blk_queue_softirq_done(q, scsi_softirq_done); + blk_queue_rq_timed_out(q, scsi_times_out); + blk_queue_lld_busy(q, scsi_lld_busy); return q; } -void scsi_free_queue(struct request_queue *q) -{ - blk_cleanup_queue(q); -} - /* * Function: scsi_block_requests() * @@ -1672,22 +1745,14 @@ int __init scsi_init_queue(void) { int i; - scsi_io_context_cache = kmem_cache_create("scsi_io_context", - sizeof(struct scsi_io_context), - 0, 0, NULL); - if (!scsi_io_context_cache) { - printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); + scsi_sdb_cache = kmem_cache_create("scsi_data_buffer", + sizeof(struct scsi_data_buffer), + 0, 0, NULL); + if (!scsi_sdb_cache) { + printk(KERN_ERR "SCSI: can't init scsi sdb cache\n"); return -ENOMEM; } - scsi_bidi_sdb_cache = kmem_cache_create("scsi_bidi_sdb", - sizeof(struct scsi_data_buffer), - 0, 0, NULL); - if (!scsi_bidi_sdb_cache) { - printk(KERN_ERR "SCSI: can't init scsi bidi sdb cache\n"); - goto cleanup_io_context; - } - for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; int size = sgp->size * sizeof(struct scatterlist); @@ -1697,7 +1762,7 @@ int __init scsi_init_queue(void) if (!sgp->slab) { printk(KERN_ERR "SCSI: can't init sg slab %s\n", sgp->name); - goto cleanup_bidi_sdb; + goto cleanup_sdb; } sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, @@ -1705,13 +1770,13 @@ int __init scsi_init_queue(void) if (!sgp->pool) { printk(KERN_ERR "SCSI: can't init sg mempool %s\n", sgp->name); - goto cleanup_bidi_sdb; + goto cleanup_sdb; } } return 0; -cleanup_bidi_sdb: +cleanup_sdb: for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; if (sgp->pool) @@ -1719,9 +1784,7 @@ cleanup_bidi_sdb: if (sgp->slab) kmem_cache_destroy(sgp->slab); } - kmem_cache_destroy(scsi_bidi_sdb_cache); -cleanup_io_context: - kmem_cache_destroy(scsi_io_context_cache); + kmem_cache_destroy(scsi_sdb_cache); return -ENOMEM; } @@ -1730,8 +1793,7 @@ void scsi_exit_queue(void) { int i; - kmem_cache_destroy(scsi_io_context_cache); - kmem_cache_destroy(scsi_bidi_sdb_cache); + kmem_cache_destroy(scsi_sdb_cache); for (i = 0; i < SG_MEMPOOL_NR; i++) { struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; @@ -1811,7 +1873,7 @@ scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, } ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, - sshdr, timeout, retries); + sshdr, timeout, retries, NULL); kfree(real_buffer); return ret; } @@ -1876,7 +1938,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, memset(buffer, 0, len); result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, - sshdr, timeout, retries); + sshdr, timeout, retries, NULL); /* This code looks awful: what it's doing is making sure an * ILLEGAL REQUEST sense return identifies the actual command @@ -1937,8 +1999,7 @@ EXPORT_SYMBOL(scsi_mode_sense); * in. * * Returns zero if unsuccessful or an error if TUR failed. For - * removable media, a return of NOT_READY or UNIT_ATTENTION is - * translated to success, with the ->changed flag updated. + * removable media, UNIT_ATTENTION sets ->changed flag. **/ int scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, @@ -1958,24 +2019,13 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, /* try to eat the UNIT_ATTENTION if there are enough retries */ do { result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr, - timeout, retries); - } while ((driver_byte(result) & DRIVER_SENSE) && - sshdr && sshdr->sense_key == UNIT_ATTENTION && - --retries); - - if (!sshdr) - /* could not allocate sense buffer, so can't process it */ - return result; - - if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { - - if ((scsi_sense_valid(sshdr)) && - ((sshdr->sense_key == UNIT_ATTENTION) || - (sshdr->sense_key == NOT_READY))) { + timeout, retries, NULL); + if (sdev->removable && scsi_sense_valid(sshdr) && + sshdr->sense_key == UNIT_ATTENTION) sdev->changed = 1; - result = 0; - } - } + } while (scsi_sense_valid(sshdr) && + sshdr->sense_key == UNIT_ATTENTION && --retries); + if (!sshdr_external) kfree(sshdr); return result; @@ -2000,15 +2050,19 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) switch (state) { case SDEV_CREATED: - /* There are no legal states that come back to - * created. This is the manually initialised start - * state */ - goto illegal; + switch (oldstate) { + case SDEV_CREATED_BLOCK: + break; + default: + goto illegal; + } + break; case SDEV_RUNNING: switch (oldstate) { case SDEV_CREATED: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: case SDEV_QUIESCE: case SDEV_BLOCK: break; @@ -2021,6 +2075,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) switch (oldstate) { case SDEV_RUNNING: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: break; default: goto illegal; @@ -2028,6 +2083,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) break; case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: switch (oldstate) { case SDEV_CREATED: case SDEV_RUNNING: @@ -2041,8 +2097,17 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_BLOCK: switch (oldstate) { - case SDEV_CREATED: case SDEV_RUNNING: + case SDEV_CREATED_BLOCK: + break; + default: + goto illegal; + } + break; + + case SDEV_CREATED_BLOCK: + switch (oldstate) { + case SDEV_CREATED: break; default: goto illegal; @@ -2055,6 +2120,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_RUNNING: case SDEV_QUIESCE: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: case SDEV_BLOCK: break; default: @@ -2067,7 +2133,9 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) case SDEV_CREATED: case SDEV_RUNNING: case SDEV_OFFLINE: + case SDEV_TRANSPORT_OFFLINE: case SDEV_CANCEL: + case SDEV_CREATED_BLOCK: break; default: goto illegal; @@ -2105,7 +2173,21 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) case SDEV_EVT_MEDIA_CHANGE: envp[idx++] = "SDEV_MEDIA_CHANGE=1"; break; - + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; + break; + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; + break; + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; + break; + case SDEV_EVT_LUN_CHANGE_REPORTED: + envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; + break; default: /* do nothing */ break; @@ -2126,10 +2208,15 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) void scsi_evt_thread(struct work_struct *work) { struct scsi_device *sdev; + enum scsi_device_event evt_type; LIST_HEAD(event_list); sdev = container_of(work, struct scsi_device, event_work); + for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) + if (test_and_clear_bit(evt_type, sdev->pending_events)) + sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); + while (1) { struct scsi_event *evt; struct list_head *this, *tmp; @@ -2199,6 +2286,11 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, /* evt_type-specific initialization, if any */ switch (evt_type) { case SDEV_EVT_MEDIA_CHANGE: + case SDEV_EVT_INQUIRY_CHANGE_REPORTED: + case SDEV_EVT_CAPACITY_CHANGE_REPORTED: + case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: + case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: + case SDEV_EVT_LUN_CHANGE_REPORTED: default: /* do nothing */ break; @@ -2270,10 +2362,14 @@ EXPORT_SYMBOL(scsi_device_quiesce); * * Must be called with user context, may sleep. */ -void -scsi_device_resume(struct scsi_device *sdev) +void scsi_device_resume(struct scsi_device *sdev) { - if(scsi_device_set_state(sdev, SDEV_RUNNING)) + /* check if the device state was mutated prior to resume, and if + * so assume the state is being managed elsewhere (for example + * device deleted during suspend) + */ + if (sdev->sdev_state != SDEV_QUIESCE || + scsi_device_set_state(sdev, SDEV_RUNNING)) return; scsi_run_queue(sdev->request_queue); } @@ -2320,7 +2416,6 @@ EXPORT_SYMBOL(scsi_target_resume); * (which must be a legal transition). When the device is in this * state, all commands are deferred until the scsi lld reenables * the device with scsi_device_unblock or device_block_tmo fires. - * This routine assumes the host_lock is held on entry. */ int scsi_internal_device_block(struct scsi_device *sdev) @@ -2330,8 +2425,12 @@ scsi_internal_device_block(struct scsi_device *sdev) int err = 0; err = scsi_device_set_state(sdev, SDEV_BLOCK); - if (err) - return err; + if (err) { + err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); + + if (err) + return err; + } /* * The device has transitioned to SDEV_BLOCK. Stop the @@ -2349,6 +2448,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); /** * scsi_internal_device_unblock - resume a device after a block request * @sdev: device to resume + * @new_state: state to set devices to after unblocking * * Called by scsi lld's or the midlayer to restart the device queue * for the previously suspended scsi device. Called from interrupt or @@ -2358,24 +2458,32 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block); * * Notes: * This routine transitions the device to the SDEV_RUNNING state - * (which must be a legal transition) allowing the midlayer to - * goose the queue for this device. This routine assumes the - * host_lock is held upon entry. + * or to one of the offline states (which must be a legal transition) + * allowing the midlayer to goose the queue for this device. */ int -scsi_internal_device_unblock(struct scsi_device *sdev) +scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state) { struct request_queue *q = sdev->request_queue; - int err; unsigned long flags; - - /* - * Try to transition the scsi device to SDEV_RUNNING - * and goose the device queue if successful. + + /* + * Try to transition the scsi device to SDEV_RUNNING or one of the + * offlined states and goose the device queue if successful. */ - err = scsi_device_set_state(sdev, SDEV_RUNNING); - if (err) - return err; + if ((sdev->sdev_state == SDEV_BLOCK) || + (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) + sdev->sdev_state = new_state; + else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { + if (new_state == SDEV_TRANSPORT_OFFLINE || + new_state == SDEV_OFFLINE) + sdev->sdev_state = new_state; + else + sdev->sdev_state = SDEV_CREATED; + } else if (sdev->sdev_state != SDEV_CANCEL && + sdev->sdev_state != SDEV_OFFLINE) + return -EINVAL; spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); @@ -2414,26 +2522,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block); static void device_unblock(struct scsi_device *sdev, void *data) { - scsi_internal_device_unblock(sdev); + scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data); } static int target_unblock(struct device *dev, void *data) { if (scsi_is_target_device(dev)) - starget_for_each_device(to_scsi_target(dev), NULL, + starget_for_each_device(to_scsi_target(dev), data, device_unblock); return 0; } void -scsi_target_unblock(struct device *dev) +scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) { if (scsi_is_target_device(dev)) - starget_for_each_device(to_scsi_target(dev), NULL, + starget_for_each_device(to_scsi_target(dev), &new_state, device_unblock); else - device_for_each_child(dev, NULL, target_unblock); + device_for_each_child(dev, &new_state, target_unblock); } EXPORT_SYMBOL_GPL(scsi_target_unblock); @@ -2466,7 +2574,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, if (unlikely(i == sg_count)) { printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " "elements %d\n", - __FUNCTION__, sg_len, *offset, sg_count); + __func__, sg_len, *offset, sg_count); WARN_ON(1); return NULL; } @@ -2483,7 +2591,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, if (*len > sg_len) *len = sg_len; - return kmap_atomic(page, KM_BIO_SRC_IRQ); + return kmap_atomic(page); } EXPORT_SYMBOL(scsi_kmap_atomic_sg); @@ -2493,6 +2601,20 @@ EXPORT_SYMBOL(scsi_kmap_atomic_sg); */ void scsi_kunmap_atomic_sg(void *virt) { - kunmap_atomic(virt, KM_BIO_SRC_IRQ); + kunmap_atomic(virt); } EXPORT_SYMBOL(scsi_kunmap_atomic_sg); + +void sdev_disable_disk_events(struct scsi_device *sdev) +{ + atomic_inc(&sdev->disk_events_disable_depth); +} +EXPORT_SYMBOL(sdev_disable_disk_events); + +void sdev_enable_disk_events(struct scsi_device *sdev) +{ + if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) + return; + atomic_dec(&sdev->disk_events_disable_depth); +} +EXPORT_SYMBOL(sdev_enable_disk_events); |
