diff options
Diffstat (limited to 'drivers/scsi/scsi_lib.c')
| -rw-r--r-- | drivers/scsi/scsi_lib.c | 703 | 
1 files changed, 380 insertions, 323 deletions
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index eafeeda6e19..3f50dfcb322 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -12,6 +12,7 @@  #include <linux/blkdev.h>  #include <linux/completion.h>  #include <linux/kernel.h> +#include <linux/export.h>  #include <linux/mempool.h>  #include <linux/slab.h>  #include <linux/init.h> @@ -67,29 +68,12 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {  struct kmem_cache *scsi_sdb_cache; -static void scsi_run_queue(struct request_queue *q); -  /* - * Function:	scsi_unprep_request() - * - * Purpose:	Remove all preparation done for a request, including its - *		associated scsi_cmnd, so that it can be requeued. - * - * Arguments:	req	- request to unprepare - * - * Lock status:	Assumed that no locks are held upon entry. - * - * Returns:	Nothing. + * When to reinvoke queueing after a resource shortage. It's 3 msecs to + * not change behaviour from the previous unplug mechanism, experimentation + * may prove this needs changing.   */ -static void scsi_unprep_request(struct request *req) -{ -	struct scsi_cmnd *cmd = req->special; - -	blk_unprep_request(req); -	req->special = NULL; - -	scsi_put_command(cmd); -} +#define SCSI_QUEUE_DELAY	3  /**   * __scsi_queue_insert - private queue insertion @@ -103,7 +87,7 @@ static void scsi_unprep_request(struct request *req)   * for a requeue after completion, which should only occur in this   * file.   */ -static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) +static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)  {  	struct Scsi_Host *host = cmd->device->host;  	struct scsi_device *device = cmd->device; @@ -132,6 +116,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)  		host->host_blocked = host->max_host_blocked;  		break;  	case SCSI_MLQUEUE_DEVICE_BUSY: +	case SCSI_MLQUEUE_EH_RETRY:  		device->device_blocked = device->max_device_blocked;  		break;  	case SCSI_MLQUEUE_TARGET_BUSY: @@ -148,22 +133,15 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)  	/*  	 * Requeue this command.  It will go before all other commands -	 * that are already in the queue. -	 * -	 * NOTE: there is magic here about the way the queue is plugged if -	 * we have no outstanding commands. -	 *  -	 * Although we *don't* plug the queue, we call the request -	 * function.  The SCSI request function detects the blocked condition -	 * and plugs the queue appropriately. -         */ +	 * that are already in the queue. Schedule requeue work under +	 * lock such that the kblockd_schedule_work() call happens +	 * before blk_cleanup_queue() finishes. +	 */ +	cmd->result = 0;  	spin_lock_irqsave(q->queue_lock, flags);  	blk_requeue_request(q, cmd->request); +	kblockd_schedule_work(&device->requeue_work);  	spin_unlock_irqrestore(q->queue_lock, flags); - -	scsi_run_queue(q); - -	return 0;  }  /* @@ -185,9 +163,9 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)   * Notes:       This could be called either from an interrupt context or a   *              normal process context.   */ -int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) +void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)  { -	return __scsi_queue_insert(cmd, reason, 1); +	__scsi_queue_insert(cmd, reason, 1);  }  /**   * scsi_execute - insert request and wait for the result @@ -207,7 +185,7 @@ int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)   */  int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,  		 int data_direction, void *buffer, unsigned bufflen, -		 unsigned char *sense, int timeout, int retries, int flags, +		 unsigned char *sense, int timeout, int retries, u64 flags,  		 int *resid)  {  	struct request *req; @@ -215,6 +193,9 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,  	int ret = DRIVER_ERROR << 24;  	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); +	if (!req) +		return ret; +	blk_rq_set_block_pc(req);  	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,  					buffer, bufflen, __GFP_WAIT)) @@ -226,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,  	req->sense_len = 0;  	req->retries = retries;  	req->timeout = timeout; -	req->cmd_type = REQ_TYPE_BLOCK_PC;  	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;  	/* @@ -253,11 +233,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,  }  EXPORT_SYMBOL(scsi_execute); - -int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, +int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,  		     int data_direction, void *buffer, unsigned bufflen,  		     struct scsi_sense_hdr *sshdr, int timeout, int retries, -		     int *resid) +		     int *resid, u64 flags)  {  	char *sense = NULL;  	int result; @@ -268,14 +247,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,  			return DRIVER_ERROR << 24;  	}  	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, -			      sense, timeout, retries, 0, resid); +			      sense, timeout, retries, flags, resid);  	if (sshdr)  		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);  	kfree(sense);  	return result;  } -EXPORT_SYMBOL(scsi_execute_req); +EXPORT_SYMBOL(scsi_execute_req_flags);  /*   * Function:    scsi_init_cmd_errh() @@ -385,33 +364,17 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)  	return 0;  } -/* - * Function:	scsi_run_queue() - * - * Purpose:	Select a proper request queue to serve next - * - * Arguments:	q	- last request's queue - * - * Returns:     Nothing - * - * Notes:	The previous command was completely finished, start - *		a new one if possible. - */ -static void scsi_run_queue(struct request_queue *q) +static void scsi_starved_list_run(struct Scsi_Host *shost)  { -	struct scsi_device *sdev = q->queuedata; -	struct Scsi_Host *shost = sdev->host;  	LIST_HEAD(starved_list); +	struct scsi_device *sdev;  	unsigned long flags; -	if (scsi_target(sdev)->single_lun) -		scsi_single_lun_run(sdev); -  	spin_lock_irqsave(shost->host_lock, flags);  	list_splice_init(&shost->starved_list, &starved_list);  	while (!list_empty(&starved_list)) { -		int flagset; +		struct request_queue *slq;  		/*  		 * As long as shost is accepting commands and we have @@ -435,28 +398,65 @@ static void scsi_run_queue(struct request_queue *q)  			continue;  		} -		spin_unlock(shost->host_lock); +		/* +		 * Once we drop the host lock, a racing scsi_remove_device() +		 * call may remove the sdev from the starved list and destroy +		 * it and the queue.  Mitigate by taking a reference to the +		 * queue and never touching the sdev again after we drop the +		 * host lock.  Note: if __scsi_remove_device() invokes +		 * blk_cleanup_queue() before the queue is run from this +		 * function then blk_run_queue() will return immediately since +		 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING. +		 */ +		slq = sdev->request_queue; +		if (!blk_get_queue(slq)) +			continue; +		spin_unlock_irqrestore(shost->host_lock, flags); -		spin_lock(sdev->request_queue->queue_lock); -		flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && -				!test_bit(QUEUE_FLAG_REENTER, -					&sdev->request_queue->queue_flags); -		if (flagset) -			queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); -		__blk_run_queue(sdev->request_queue); -		if (flagset) -			queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); -		spin_unlock(sdev->request_queue->queue_lock); +		blk_run_queue(slq); +		blk_put_queue(slq); -		spin_lock(shost->host_lock); +		spin_lock_irqsave(shost->host_lock, flags);  	}  	/* put any unprocessed entries back */  	list_splice(&starved_list, &shost->starved_list);  	spin_unlock_irqrestore(shost->host_lock, flags); +} + +/* + * Function:   scsi_run_queue() + * + * Purpose:    Select a proper request queue to serve next + * + * Arguments:  q       - last request's queue + * + * Returns:     Nothing + * + * Notes:      The previous command was completely finished, start + *             a new one if possible. + */ +static void scsi_run_queue(struct request_queue *q) +{ +	struct scsi_device *sdev = q->queuedata; + +	if (scsi_target(sdev)->single_lun) +		scsi_single_lun_run(sdev); +	if (!list_empty(&sdev->host->starved_list)) +		scsi_starved_list_run(sdev->host);  	blk_run_queue(q);  } +void scsi_requeue_run_queue(struct work_struct *work) +{ +	struct scsi_device *sdev; +	struct request_queue *q; + +	sdev = container_of(work, struct scsi_device, requeue_work); +	q = sdev->request_queue; +	scsi_run_queue(q); +} +  /*   * Function:	scsi_requeue_command()   * @@ -477,15 +477,20 @@ static void scsi_run_queue(struct request_queue *q)   */  static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)  { +	struct scsi_device *sdev = cmd->device;  	struct request *req = cmd->request;  	unsigned long flags;  	spin_lock_irqsave(q->queue_lock, flags); -	scsi_unprep_request(req); +	blk_unprep_request(req); +	req->special = NULL; +	scsi_put_command(cmd);  	blk_requeue_request(q, req);  	spin_unlock_irqrestore(q->queue_lock, flags);  	scsi_run_queue(q); + +	put_device(&sdev->sdev_gendev);  }  void scsi_next_command(struct scsi_cmnd *cmd) @@ -493,13 +498,9 @@ void scsi_next_command(struct scsi_cmnd *cmd)  	struct scsi_device *sdev = cmd->device;  	struct request_queue *q = sdev->request_queue; -	/* need to hold a reference on the device before we let go of the cmd */ -	get_device(&sdev->sdev_gendev); -  	scsi_put_command(cmd);  	scsi_run_queue(q); -	/* ok to remove device now */  	put_device(&sdev->sdev_gendev);  } @@ -511,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)  		scsi_run_queue(sdev->request_queue);  } -static void __scsi_release_buffers(struct scsi_cmnd *, int); - -/* - * Function:    scsi_end_request() - * - * Purpose:     Post-processing of completed commands (usually invoked at end - *		of upper level post-processing and scsi_io_completion). - * - * Arguments:   cmd	 - command that is complete. - *              error    - 0 if I/O indicates success, < 0 for I/O error. - *              bytes    - number of bytes of completed I/O - *		requeue  - indicates whether we should requeue leftovers. - * - * Lock status: Assumed that lock is not held upon entry. - * - * Returns:     cmd if requeue required, NULL otherwise. - * - * Notes:       This is called for block device requests in order to - *              mark some number of sectors as complete. - *  - *		We are guaranteeing that the request queue will be goosed - *		at some point during this call. - * Notes:	If cmd was requeued, upon return it will be a stale pointer. - */ -static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, -					  int bytes, int requeue) -{ -	struct request_queue *q = cmd->device->request_queue; -	struct request *req = cmd->request; - -	/* -	 * If there are blocks left over at the end, set up the command -	 * to queue the remainder of them. -	 */ -	if (blk_end_request(req, error, bytes)) { -		/* kill remainder if no retrys */ -		if (error && scsi_noretry_cmd(cmd)) -			blk_end_request_all(req, error); -		else { -			if (requeue) { -				/* -				 * Bleah.  Leftovers again.  Stick the -				 * leftovers in the front of the -				 * queue, and goose the queue again. -				 */ -				scsi_release_buffers(cmd); -				scsi_requeue_command(q, cmd); -				cmd = NULL; -			} -			return cmd; -		} -	} - -	/* -	 * This will goose the queue request function at the end, so we don't -	 * need to worry about launching another command. -	 */ -	__scsi_release_buffers(cmd, 0); -	scsi_next_command(cmd); -	return NULL; -} -  static inline unsigned int scsi_sgtable_index(unsigned short nents)  {  	unsigned int index; @@ -624,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)  	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);  } -static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) -{ - -	if (cmd->sdb.table.nents) -		scsi_free_sgtable(&cmd->sdb); - -	memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - -	if (do_bidi_check && scsi_bidi_cmnd(cmd)) { -		struct scsi_data_buffer *bidi_sdb = -			cmd->request->next_rq->special; -		scsi_free_sgtable(bidi_sdb); -		kmem_cache_free(scsi_sdb_cache, bidi_sdb); -		cmd->request->next_rq->special = NULL; -	} - -	if (scsi_prot_sg_count(cmd)) -		scsi_free_sgtable(cmd->prot_sdb); -} -  /*   * Function:    scsi_release_buffers()   * - * Purpose:     Completion processing for block device I/O requests. + * Purpose:     Free resources allocate for a scsi_command.   *   * Arguments:   cmd	- command that we are bailing.   * @@ -658,15 +577,75 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)   * Notes:       In the event that an upper level driver rejects a   *		command, we must release resources allocated during   *		the __init_io() function.  Primarily this would involve - *		the scatter-gather table, and potentially any bounce - *		buffers. + *		the scatter-gather table.   */  void scsi_release_buffers(struct scsi_cmnd *cmd)  { -	__scsi_release_buffers(cmd, 1); +	if (cmd->sdb.table.nents) +		scsi_free_sgtable(&cmd->sdb); + +	memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + +	if (scsi_prot_sg_count(cmd)) +		scsi_free_sgtable(cmd->prot_sdb);  }  EXPORT_SYMBOL(scsi_release_buffers); +static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) +{ +	struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; + +	scsi_free_sgtable(bidi_sdb); +	kmem_cache_free(scsi_sdb_cache, bidi_sdb); +	cmd->request->next_rq->special = NULL; +} + +/** + * __scsi_error_from_host_byte - translate SCSI error code into errno + * @cmd:	SCSI command (unused) + * @result:	scsi error code + * + * Translate SCSI error code into standard UNIX errno. + * Return values: + * -ENOLINK	temporary transport failure + * -EREMOTEIO	permanent target failure, do not retry + * -EBADE	permanent nexus failure, retry on other path + * -ENOSPC	No write space available + * -ENODATA	Medium error + * -EIO		unspecified I/O error + */ +static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result) +{ +	int error = 0; + +	switch(host_byte(result)) { +	case DID_TRANSPORT_FAILFAST: +		error = -ENOLINK; +		break; +	case DID_TARGET_FAILURE: +		set_host_byte(cmd, DID_OK); +		error = -EREMOTEIO; +		break; +	case DID_NEXUS_FAILURE: +		set_host_byte(cmd, DID_OK); +		error = -EBADE; +		break; +	case DID_ALLOC_FAILURE: +		set_host_byte(cmd, DID_OK); +		error = -ENOSPC; +		break; +	case DID_MEDIUM_ERROR: +		set_host_byte(cmd, DID_OK); +		error = -ENODATA; +		break; +	default: +		error = -EIO; +		break; +	} + +	return error; +} +  /*   * Function:    scsi_io_completion()   * @@ -678,16 +657,9 @@ EXPORT_SYMBOL(scsi_release_buffers);   *   * Returns:     Nothing   * - * Notes:       This function is matched in terms of capabilities to - *              the function that created the scatter-gather list. - *              In other words, if there are no bounce buffers - *              (the normal case for most drivers), we don't need - *              the logic to deal with cleaning up afterwards. - * - *		We must call scsi_end_request().  This will finish off - *		the specified number of sectors.  If we are done, the - *		command block will be released and the queue function - *		will be goosed.  If we are not done then we have to + * Notes:       We will finish off the specified number of sectors.  If we + *		are done, the command block will be released and the queue + *		function will be goosed.  If we are not done then we have to   *		figure out what to do next:   *   *		a) We can call scsi_requeue_command().  The request @@ -696,7 +668,7 @@ EXPORT_SYMBOL(scsi_release_buffers);   *		   be used if we made forward progress, or if we want   *		   to switch from READ(10) to READ(6) for example.   * - *		b) We can call scsi_queue_insert().  The request will + *		b) We can call __scsi_queue_insert().  The request will   *		   be put back on the queue and retried using the same   *		   command as before, possibly after a delay.   * @@ -715,6 +687,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,  	      ACTION_DELAYED_RETRY} action;  	char *description = NULL; +	unsigned long wait_for = (cmd->allowed + 1) * req->timeout;  	if (result) {  		sense_valid = scsi_command_normalize_sense(cmd, &sshdr); @@ -723,7 +696,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  	}  	if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ -		req->errors = result;  		if (result) {  			if (sense_valid && req->sense) {  				/* @@ -737,8 +709,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  				req->sense_len = len;  			}  			if (!sense_deferred) -				error = -EIO; +				error = __scsi_error_from_host_byte(cmd, result);  		} +		/* +		 * __scsi_error_from_host_byte may have reset the host_byte +		 */ +		req->errors = cmd->result;  		req->resid_len = scsi_get_resid(cmd); @@ -750,11 +726,21 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  			req->next_rq->resid_len = scsi_in(cmd)->resid;  			scsi_release_buffers(cmd); +			scsi_release_bidi_buffers(cmd); +  			blk_end_request_all(req, 0);  			scsi_next_command(cmd);  			return;  		} +	} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { +		/* +		 * Certain non BLOCK_PC requests are commands that don't +		 * actually transfer anything (FLUSH), so cannot use +		 * good_bytes != blk_rq_bytes(req) as the signal for an error. +		 * This sets the error explicitly for the problem case. +		 */ +		error = __scsi_error_from_host_byte(cmd, result);  	}  	/* no bidi support for !REQ_TYPE_BLOCK_PC yet */ @@ -789,14 +775,27 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  	}  	/* -	 * A number of bytes were successfully read.  If there -	 * are leftovers and there is some kind of error -	 * (result != 0), retry the rest. +	 * If we finished all bytes in the request we are done now.  	 */ -	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) -		return; +	if (!blk_end_request(req, error, good_bytes)) +		goto next_command; + +	/* +	 * Kill remainder if no retrys. +	 */ +	if (error && scsi_noretry_cmd(cmd)) { +		blk_end_request_all(req, error); +		goto next_command; +	} + +	/* +	 * If there had been no error, but we have leftover bytes in the +	 * requeues just queue the command up again. +	 */ +	if (result == 0) +		goto requeue; -	error = -EIO; +	error = __scsi_error_from_host_byte(cmd, result);  	if (host_byte(result) == DID_RESET) {  		/* Third party bus reset or reset for error recovery @@ -843,6 +842,26 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  				description = "Host Data Integrity Failure";  				action = ACTION_FAIL;  				error = -EILSEQ; +			/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ +			} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { +				switch (cmd->cmnd[0]) { +				case UNMAP: +					description = "Discard failure"; +					break; +				case WRITE_SAME: +				case WRITE_SAME_16: +					if (cmd->cmnd[1] & 0x8) +						description = "Discard failure"; +					else +						description = +							"Write same failure"; +					break; +				default: +					description = "Invalid command failure"; +					break; +				} +				action = ACTION_FAIL; +				error = -EREMOTEIO;  			} else  				action = ACTION_FAIL;  			break; @@ -893,10 +912,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  		action = ACTION_FAIL;  	} +	if (action != ACTION_FAIL && +	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { +		action = ACTION_FAIL; +		description = "Command timed out"; +	} +  	switch (action) {  	case ACTION_FAIL:  		/* Give up and fail the remainder of the request */ -		scsi_release_buffers(cmd);  		if (!(req->cmd_flags & REQ_QUIET)) {  			if (description)  				scmd_printk(KERN_INFO, cmd, "%s\n", @@ -906,12 +930,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  				scsi_print_sense("", cmd);  			scsi_print_command(cmd);  		} -		if (blk_end_request_err(req, error)) -			scsi_requeue_command(q, cmd); -		else -			scsi_next_command(cmd); -		break; +		if (!blk_end_request_err(req, error)) +			goto next_command; +		/*FALLTHRU*/  	case ACTION_REPREP: +	requeue:  		/* Unprep the request and put it back at the head of the queue.  		 * A new command will be prepared and issued.  		 */ @@ -927,6 +950,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);  		break;  	} +	return; + +next_command: +	scsi_release_buffers(cmd); +	scsi_next_command(cmd);  }  static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, @@ -942,8 +970,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,  		return BLKPREP_DEFER;  	} -	req->buffer = NULL; -  	/*   	 * Next, walk the list, and fill in the addresses and sizes of  	 * each segment. @@ -968,6 +994,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,   */  int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)  { +	struct scsi_device *sdev = cmd->device;  	struct request *rq = cmd->request;  	int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); @@ -1015,6 +1042,7 @@ err_exit:  	scsi_release_buffers(cmd);  	cmd->request->special = NULL;  	scsi_put_command(cmd); +	put_device(&sdev->sdev_gendev);  	return error;  }  EXPORT_SYMBOL(scsi_init_io); @@ -1025,9 +1053,15 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,  	struct scsi_cmnd *cmd;  	if (!req->special) { +		/* Bail if we can't get a reference to the device */ +		if (!get_device(&sdev->sdev_gendev)) +			return NULL; +  		cmd = scsi_get_command(sdev, GFP_ATOMIC); -		if (unlikely(!cmd)) +		if (unlikely(!cmd)) { +			put_device(&sdev->sdev_gendev);  			return NULL; +		}  		req->special = cmd;  	} else {  		cmd = req->special; @@ -1038,21 +1072,14 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,  	cmd->request = req;  	cmd->cmnd = req->cmd; +	cmd->prot_op = SCSI_PROT_NORMAL;  	return cmd;  }  int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)  { -	struct scsi_cmnd *cmd; -	int ret = scsi_prep_state_check(sdev, req); - -	if (ret != BLKPREP_OK) -		return ret; - -	cmd = scsi_get_cmd_from_req(sdev, req); -	if (unlikely(!cmd)) -		return BLKPREP_DEFER; +	struct scsi_cmnd *cmd = req->special;  	/*  	 * BLOCK_PC requests may transfer data, in which case they must @@ -1072,7 +1099,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)  		BUG_ON(blk_rq_bytes(req));  		memset(&cmd->sdb, 0, sizeof(cmd->sdb)); -		req->buffer = NULL;  	}  	cmd->cmd_len = req->cmd_len; @@ -1096,15 +1122,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);   */  int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)  { -	struct scsi_cmnd *cmd; -	int ret = scsi_prep_state_check(sdev, req); - -	if (ret != BLKPREP_OK) -		return ret; +	struct scsi_cmnd *cmd = req->special;  	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh  			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { -		ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); +		int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);  		if (ret != BLKPREP_OK)  			return ret;  	} @@ -1114,16 +1136,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)  	 */  	BUG_ON(!req->nr_phys_segments); -	cmd = scsi_get_cmd_from_req(sdev, req); -	if (unlikely(!cmd)) -		return BLKPREP_DEFER; -  	memset(cmd->cmnd, 0, BLK_MAX_CDB);  	return scsi_init_io(cmd, GFP_ATOMIC);  }  EXPORT_SYMBOL(scsi_setup_fs_cmnd); -int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) +static int +scsi_prep_state_check(struct scsi_device *sdev, struct request *req)  {  	int ret = BLKPREP_OK; @@ -1134,6 +1153,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)  	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {  		switch (sdev->sdev_state) {  		case SDEV_OFFLINE: +		case SDEV_TRANSPORT_OFFLINE:  			/*  			 * If the device is offline we refuse to process any  			 * commands.  The device must be brought online @@ -1174,9 +1194,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)  	}  	return ret;  } -EXPORT_SYMBOL(scsi_prep_state_check); -int scsi_prep_return(struct request_queue *q, struct request *req, int ret) +static int +scsi_prep_return(struct request_queue *q, struct request *req, int ret)  {  	struct scsi_device *sdev = q->queuedata; @@ -1188,17 +1208,18 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)  			struct scsi_cmnd *cmd = req->special;  			scsi_release_buffers(cmd);  			scsi_put_command(cmd); +			put_device(&sdev->sdev_gendev);  			req->special = NULL;  		}  		break;  	case BLKPREP_DEFER:  		/*  		 * If we defer, the blk_peek_request() returns NULL, but the -		 * queue must be restarted, so we plug here if no returning -		 * command will automatically do that. +		 * queue must be restarted, so we schedule a callback to happen +		 * shortly.  		 */  		if (sdev->device_busy == 0) -			blk_plug_device(q); +			blk_delay_queue(q, SCSI_QUEUE_DELAY);  		break;  	default:  		req->cmd_flags |= REQ_DONTPREP; @@ -1206,18 +1227,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)  	return ret;  } -EXPORT_SYMBOL(scsi_prep_return); -int scsi_prep_fn(struct request_queue *q, struct request *req) +static int scsi_prep_fn(struct request_queue *q, struct request *req)  {  	struct scsi_device *sdev = q->queuedata; -	int ret = BLKPREP_KILL; +	struct scsi_cmnd *cmd; +	int ret; + +	ret = scsi_prep_state_check(sdev, req); +	if (ret != BLKPREP_OK) +		goto out; -	if (req->cmd_type == REQ_TYPE_BLOCK_PC) +	cmd = scsi_get_cmd_from_req(sdev, req); +	if (unlikely(!cmd)) { +		ret = BLKPREP_DEFER; +		goto out; +	} + +	if (req->cmd_type == REQ_TYPE_FS) +		ret = scsi_cmd_to_driver(cmd)->init_command(cmd); +	else if (req->cmd_type == REQ_TYPE_BLOCK_PC)  		ret = scsi_setup_blk_pc_cmnd(sdev, req); +	else +		ret = BLKPREP_KILL; + +out:  	return scsi_prep_return(q, req, ret);  } -EXPORT_SYMBOL(scsi_prep_fn); + +static void scsi_unprep_fn(struct request_queue *q, struct request *req) +{ +	if (req->cmd_type == REQ_TYPE_FS) { +		struct scsi_cmnd *cmd = req->special; +		struct scsi_driver *drv = scsi_cmd_to_driver(cmd); + +		if (drv->uninit_command) +			drv->uninit_command(cmd); +	} +}  /*   * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else @@ -1237,7 +1284,7 @@ static inline int scsi_dev_queue_ready(struct request_queue *q,  				   sdev_printk(KERN_INFO, sdev,  				   "unblocking device at zero depth\n"));  		} else { -			blk_plug_device(q); +			blk_delay_queue(q, SCSI_QUEUE_DELAY);  			return 0;  		}  	} @@ -1278,16 +1325,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,  	}  	if (scsi_target_is_busy(starget)) { -		if (list_empty(&sdev->starved_entry)) { -			list_add_tail(&sdev->starved_entry, -				      &shost->starved_list); -			return 0; -		} +		list_move_tail(&sdev->starved_entry, &shost->starved_list); +		return 0;  	} -	/* We're OK to process the command, so we can't be starved */ -	if (!list_empty(&sdev->starved_entry)) -		list_del_init(&sdev->starved_entry);  	return 1;  } @@ -1337,24 +1378,27 @@ static inline int scsi_host_queue_ready(struct request_queue *q,   * may be changed after request stacking drivers call the function,   * regardless of taking lock or not.   * - * When scsi can't dispatch I/Os anymore and needs to kill I/Os - * (e.g. !sdev), scsi needs to return 'not busy'. - * Otherwise, request stacking drivers may hold requests forever. + * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi + * needs to return 'not busy'. Otherwise, request stacking drivers + * may hold requests forever.   */  static int scsi_lld_busy(struct request_queue *q)  {  	struct scsi_device *sdev = q->queuedata;  	struct Scsi_Host *shost; -	struct scsi_target *starget; -	if (!sdev) +	if (blk_queue_dying(q))  		return 0;  	shost = sdev->host; -	starget = scsi_target(sdev); -	if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || -	    scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) +	/* +	 * Ignore host/starget busy state. +	 * Since block layer does not have a concept of fairness across +	 * multiple queues, congestion of host/starget needs to be handled +	 * in SCSI layer. +	 */ +	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))  		return 1;  	return 0; @@ -1372,6 +1416,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)  	blk_start_request(req); +	scmd_printk(KERN_INFO, cmd, "killing request\n"); +  	sdev = cmd->device;  	starget = scsi_target(sdev);  	shost = sdev->host; @@ -1403,11 +1449,6 @@ static void scsi_softirq_done(struct request *rq)  	INIT_LIST_HEAD(&cmd->eh_entry); -	/* -	 * Set the serial numbers back to zero -	 */ -	cmd->serial_number = 0; -  	atomic_inc(&cmd->device->iodone_cnt);  	if (cmd->result)  		atomic_inc(&cmd->device->ioerr_cnt); @@ -1451,29 +1492,20 @@ static void scsi_softirq_done(struct request *rq)   * Lock status: IO request lock assumed to be held when called.   */  static void scsi_request_fn(struct request_queue *q) +	__releases(q->queue_lock) +	__acquires(q->queue_lock)  {  	struct scsi_device *sdev = q->queuedata;  	struct Scsi_Host *shost;  	struct scsi_cmnd *cmd;  	struct request *req; -	if (!sdev) { -		printk("scsi: killing requests for dead queue\n"); -		while ((req = blk_peek_request(q)) != NULL) -			scsi_kill_request(req, q); -		return; -	} - -	if(!get_device(&sdev->sdev_gendev)) -		/* We must be tearing the block queue down already */ -		return; -  	/*  	 * To start with, we keep looping until the queue is empty, or until  	 * the host is no longer able to accept any more requests.  	 */  	shost = sdev->host; -	while (!blk_queue_plugged(q)) { +	for (;;) {  		int rtn;  		/*  		 * get next queueable request.  We do this early to make sure @@ -1552,18 +1584,11 @@ static void scsi_request_fn(struct request_queue *q)  		 */  		rtn = scsi_dispatch_cmd(cmd);  		spin_lock_irq(q->queue_lock); -		if(rtn) { -			/* we're refusing the command; because of -			 * the way locks get dropped, we need to  -			 * check here if plugging is required */ -			if(sdev->device_busy == 0) -				blk_plug_device(q); - -			break; -		} +		if (rtn) +			goto out_delay;  	} -	goto out; +	return;   not_ready:  	spin_unlock_irq(shost->host_lock); @@ -1579,14 +1604,9 @@ static void scsi_request_fn(struct request_queue *q)  	spin_lock_irq(q->queue_lock);  	blk_requeue_request(q, req);  	sdev->device_busy--; -	if(sdev->device_busy == 0) -		blk_plug_device(q); - out: -	/* must be careful here...if we trigger the ->remove() function -	 * we cannot be holding the q lock */ -	spin_unlock_irq(q->queue_lock); -	put_device(&sdev->sdev_gendev); -	spin_lock_irq(q->queue_lock); +out_delay: +	if (sdev->device_busy == 0) +		blk_delay_queue(q, SCSI_QUEUE_DELAY);  }  u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) @@ -1605,7 +1625,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)  	host_dev = scsi_get_device(shost);  	if (host_dev && host_dev->dma_mask) -		bounce_limit = *host_dev->dma_mask; +		bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;  	return bounce_limit;  } @@ -1615,7 +1635,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,  					 request_fn_proc *request_fn)  {  	struct request_queue *q; -	struct device *dev = shost->shost_gendev.parent; +	struct device *dev = shost->dma_dev;  	q = blk_init_queue(request_fn, NULL);  	if (!q) @@ -1642,9 +1662,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,  	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); -	/* New queue, no concurrency on queue_flags */  	if (!shost->use_clustering) -		queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); +		q->limits.cluster = 0;  	/*  	 * set a reasonable default alignment on word boundaries: the @@ -1666,17 +1685,13 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)  		return NULL;  	blk_queue_prep_rq(q, scsi_prep_fn); +	blk_queue_unprep_rq(q, scsi_unprep_fn);  	blk_queue_softirq_done(q, scsi_softirq_done);  	blk_queue_rq_timed_out(q, scsi_times_out);  	blk_queue_lld_busy(q, scsi_lld_busy);  	return q;  } -void scsi_free_queue(struct request_queue *q) -{ -	blk_cleanup_queue(q); -} -  /*   * Function:    scsi_block_requests()   * @@ -1984,8 +1999,7 @@ EXPORT_SYMBOL(scsi_mode_sense);   *		in.   *   *	Returns zero if unsuccessful or an error if TUR failed.  For - *	removable media, a return of NOT_READY or UNIT_ATTENTION is - *	translated to success, with the ->changed flag updated. + *	removable media, UNIT_ATTENTION sets ->changed flag.   **/  int  scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, @@ -2012,16 +2026,6 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,  	} while (scsi_sense_valid(sshdr) &&  		 sshdr->sense_key == UNIT_ATTENTION && --retries); -	if (!sshdr) -		/* could not allocate sense buffer, so can't process it */ -		return result; - -	if (sdev->removable && scsi_sense_valid(sshdr) && -	    (sshdr->sense_key == UNIT_ATTENTION || -	     sshdr->sense_key == NOT_READY)) { -		sdev->changed = 1; -		result = 0; -	}  	if (!sshdr_external)  		kfree(sshdr);  	return result; @@ -2058,6 +2062,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)  		switch (oldstate) {  		case SDEV_CREATED:  		case SDEV_OFFLINE: +		case SDEV_TRANSPORT_OFFLINE:  		case SDEV_QUIESCE:  		case SDEV_BLOCK:  			break; @@ -2070,6 +2075,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)  		switch (oldstate) {  		case SDEV_RUNNING:  		case SDEV_OFFLINE: +		case SDEV_TRANSPORT_OFFLINE:  			break;  		default:  			goto illegal; @@ -2077,6 +2083,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)  		break;  	case SDEV_OFFLINE: +	case SDEV_TRANSPORT_OFFLINE:  		switch (oldstate) {  		case SDEV_CREATED:  		case SDEV_RUNNING: @@ -2113,6 +2120,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)  		case SDEV_RUNNING:  		case SDEV_QUIESCE:  		case SDEV_OFFLINE: +		case SDEV_TRANSPORT_OFFLINE:  		case SDEV_BLOCK:  			break;  		default: @@ -2125,7 +2133,9 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)  		case SDEV_CREATED:  		case SDEV_RUNNING:  		case SDEV_OFFLINE: +		case SDEV_TRANSPORT_OFFLINE:  		case SDEV_CANCEL: +		case SDEV_CREATED_BLOCK:  			break;  		default:  			goto illegal; @@ -2163,7 +2173,21 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)  	case SDEV_EVT_MEDIA_CHANGE:  		envp[idx++] = "SDEV_MEDIA_CHANGE=1";  		break; - +	case SDEV_EVT_INQUIRY_CHANGE_REPORTED: +		envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED"; +		break; +	case SDEV_EVT_CAPACITY_CHANGE_REPORTED: +		envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED"; +		break; +	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: +	       envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED"; +		break; +	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: +		envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED"; +		break; +	case SDEV_EVT_LUN_CHANGE_REPORTED: +		envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; +		break;  	default:  		/* do nothing */  		break; @@ -2184,10 +2208,15 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)  void scsi_evt_thread(struct work_struct *work)  {  	struct scsi_device *sdev; +	enum scsi_device_event evt_type;  	LIST_HEAD(event_list);  	sdev = container_of(work, struct scsi_device, event_work); +	for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) +		if (test_and_clear_bit(evt_type, sdev->pending_events)) +			sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); +  	while (1) {  		struct scsi_event *evt;  		struct list_head *this, *tmp; @@ -2257,6 +2286,11 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,  	/* evt_type-specific initialization, if any */  	switch (evt_type) {  	case SDEV_EVT_MEDIA_CHANGE: +	case SDEV_EVT_INQUIRY_CHANGE_REPORTED: +	case SDEV_EVT_CAPACITY_CHANGE_REPORTED: +	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: +	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: +	case SDEV_EVT_LUN_CHANGE_REPORTED:  	default:  		/* do nothing */  		break; @@ -2328,10 +2362,14 @@ EXPORT_SYMBOL(scsi_device_quiesce);   *   *	Must be called with user context, may sleep.   */ -void -scsi_device_resume(struct scsi_device *sdev) +void scsi_device_resume(struct scsi_device *sdev)  { -	if(scsi_device_set_state(sdev, SDEV_RUNNING)) +	/* check if the device state was mutated prior to resume, and if +	 * so assume the state is being managed elsewhere (for example +	 * device deleted during suspend) +	 */ +	if (sdev->sdev_state != SDEV_QUIESCE || +	    scsi_device_set_state(sdev, SDEV_RUNNING))  		return;  	scsi_run_queue(sdev->request_queue);  } @@ -2378,7 +2416,6 @@ EXPORT_SYMBOL(scsi_target_resume);   *	(which must be a legal transition).  When the device is in this   *	state, all commands are deferred until the scsi lld reenables   *	the device with scsi_device_unblock or device_block_tmo fires. - *	This routine assumes the host_lock is held on entry.   */  int  scsi_internal_device_block(struct scsi_device *sdev) @@ -2411,6 +2448,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);  /**   * scsi_internal_device_unblock - resume a device after a block request   * @sdev:	device to resume + * @new_state:	state to set devices to after unblocking   *   * Called by scsi lld's or the midlayer to restart the device queue   * for the previously suspended scsi device.  Called from interrupt or @@ -2420,25 +2458,30 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);   *   * Notes:          *	This routine transitions the device to the SDEV_RUNNING state - *	(which must be a legal transition) allowing the midlayer to - *	goose the queue for this device.  This routine assumes the  - *	host_lock is held upon entry. + *	or to one of the offline states (which must be a legal transition) + *	allowing the midlayer to goose the queue for this device.   */  int -scsi_internal_device_unblock(struct scsi_device *sdev) +scsi_internal_device_unblock(struct scsi_device *sdev, +			     enum scsi_device_state new_state)  {  	struct request_queue *q = sdev->request_queue;   	unsigned long flags; -	 -	/*  -	 * Try to transition the scsi device to SDEV_RUNNING -	 * and goose the device queue if successful.   + +	/* +	 * Try to transition the scsi device to SDEV_RUNNING or one of the +	 * offlined states and goose the device queue if successful.  	 */ -	if (sdev->sdev_state == SDEV_BLOCK) -		sdev->sdev_state = SDEV_RUNNING; -	else if (sdev->sdev_state == SDEV_CREATED_BLOCK) -		sdev->sdev_state = SDEV_CREATED; -	else if (sdev->sdev_state != SDEV_CANCEL && +	if ((sdev->sdev_state == SDEV_BLOCK) || +	    (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE)) +		sdev->sdev_state = new_state; +	else if (sdev->sdev_state == SDEV_CREATED_BLOCK) { +		if (new_state == SDEV_TRANSPORT_OFFLINE || +		    new_state == SDEV_OFFLINE) +			sdev->sdev_state = new_state; +		else +			sdev->sdev_state = SDEV_CREATED; +	} else if (sdev->sdev_state != SDEV_CANCEL &&  		 sdev->sdev_state != SDEV_OFFLINE)  		return -EINVAL; @@ -2479,26 +2522,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block);  static void  device_unblock(struct scsi_device *sdev, void *data)  { -	scsi_internal_device_unblock(sdev); +	scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);  }  static int  target_unblock(struct device *dev, void *data)  {  	if (scsi_is_target_device(dev)) -		starget_for_each_device(to_scsi_target(dev), NULL, +		starget_for_each_device(to_scsi_target(dev), data,  					device_unblock);  	return 0;  }  void -scsi_target_unblock(struct device *dev) +scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)  {  	if (scsi_is_target_device(dev)) -		starget_for_each_device(to_scsi_target(dev), NULL, +		starget_for_each_device(to_scsi_target(dev), &new_state,  					device_unblock);  	else -		device_for_each_child(dev, NULL, target_unblock); +		device_for_each_child(dev, &new_state, target_unblock);  }  EXPORT_SYMBOL_GPL(scsi_target_unblock); @@ -2548,7 +2591,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,  	if (*len > sg_len)  		*len = sg_len; -	return kmap_atomic(page, KM_BIO_SRC_IRQ); +	return kmap_atomic(page);  }  EXPORT_SYMBOL(scsi_kmap_atomic_sg); @@ -2558,6 +2601,20 @@ EXPORT_SYMBOL(scsi_kmap_atomic_sg);   */  void scsi_kunmap_atomic_sg(void *virt)  { -	kunmap_atomic(virt, KM_BIO_SRC_IRQ); +	kunmap_atomic(virt);  }  EXPORT_SYMBOL(scsi_kunmap_atomic_sg); + +void sdev_disable_disk_events(struct scsi_device *sdev) +{ +	atomic_inc(&sdev->disk_events_disable_depth); +} +EXPORT_SYMBOL(sdev_disable_disk_events); + +void sdev_enable_disk_events(struct scsi_device *sdev) +{ +	if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) +		return; +	atomic_dec(&sdev->disk_events_disable_depth); +} +EXPORT_SYMBOL(sdev_enable_disk_events);  | 
