diff options
Diffstat (limited to 'drivers/scsi')
302 files changed, 27163 insertions, 25017 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 5e1e12c0cf4..0a7325361d2 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2025,7 +2025,8 @@ static struct scsi_host_template driver_template = {  	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,  	.use_clustering		= ENABLE_CLUSTERING,  	.shost_attrs		= twa_host_attrs, -	.emulated		= 1 +	.emulated		= 1, +	.no_write_same		= 1,  };  /* This function will probe and initialize a card */ diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index c845bdbeb6c..4de346017e9 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1600,7 +1600,8 @@ static struct scsi_host_template driver_template = {  	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,  	.use_clustering		= ENABLE_CLUSTERING,  	.shost_attrs		= twl_host_attrs, -	.emulated		= 1 +	.emulated		= 1, +	.no_write_same		= 1,  };  /* This function will probe and initialize a card */ diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index b9276d10b25..752624e6bc0 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -2279,7 +2279,8 @@ static struct scsi_host_template driver_template = {  	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,	  	.use_clustering		= ENABLE_CLUSTERING,  	.shost_attrs		= tw_host_attrs, -	.emulated		= 1 +	.emulated		= 1, +	.no_write_same		= 1,  };  /* This function will probe and initialize a card */ diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index feab3a5e50b..972f8176665 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -26,8 +26,8 @@  */ -#define blogic_drvr_version		"2.1.16" -#define blogic_drvr_date		"18 July 2002" +#define blogic_drvr_version		"2.1.17" +#define blogic_drvr_date		"12 September 2013"  #include <linux/module.h>  #include <linux/init.h> @@ -311,12 +311,14 @@ static struct blogic_ccb *blogic_alloc_ccb(struct blogic_adapter *adapter)    caller.  */ -static void blogic_dealloc_ccb(struct blogic_ccb *ccb) +static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)  {  	struct blogic_adapter *adapter = ccb->adapter; -	scsi_dma_unmap(ccb->command); -	pci_unmap_single(adapter->pci_device, ccb->sensedata, +	if (ccb->command != NULL) +		scsi_dma_unmap(ccb->command); +	if (dma_unmap) +		pci_unmap_single(adapter->pci_device, ccb->sensedata,  			 ccb->sense_datalen, PCI_DMA_FROMDEVICE);  	ccb->command = NULL; @@ -696,7 +698,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)  	while ((pci_device = pci_get_device(PCI_VENDOR_ID_BUSLOGIC,  					PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,  					pci_device)) != NULL) { -		struct blogic_adapter *adapter = adapter; +		struct blogic_adapter *host_adapter = adapter;  		struct blogic_adapter_info adapter_info;  		enum blogic_isa_ioport mod_ioaddr_req;  		unsigned char bus; @@ -744,9 +746,9 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)  		   known and enabled, note that the particular Standard ISA I/O  		   Address should not be probed.  		 */ -		adapter->io_addr = io_addr; -		blogic_intreset(adapter); -		if (blogic_cmd(adapter, BLOGIC_INQ_PCI_INFO, NULL, 0, +		host_adapter->io_addr = io_addr; +		blogic_intreset(host_adapter); +		if (blogic_cmd(host_adapter, BLOGIC_INQ_PCI_INFO, NULL, 0,  				&adapter_info, sizeof(adapter_info)) ==  				sizeof(adapter_info)) {  			if (adapter_info.isa_port < 6) @@ -762,7 +764,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)  		   I/O Address assigned at system initialization.  		 */  		mod_ioaddr_req = BLOGIC_IO_DISABLE; -		blogic_cmd(adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req, +		blogic_cmd(host_adapter, BLOGIC_MOD_IOADDR, &mod_ioaddr_req,  				sizeof(mod_ioaddr_req), NULL, 0);  		/*  		   For the first MultiMaster Host Adapter enumerated, @@ -779,12 +781,12 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)  			fetch_localram.offset = BLOGIC_AUTOSCSI_BASE + 45;  			fetch_localram.count = sizeof(autoscsi_byte45); -			blogic_cmd(adapter, BLOGIC_FETCH_LOCALRAM, +			blogic_cmd(host_adapter, BLOGIC_FETCH_LOCALRAM,  					&fetch_localram, sizeof(fetch_localram),  					&autoscsi_byte45,  					sizeof(autoscsi_byte45)); -			blogic_cmd(adapter, BLOGIC_GET_BOARD_ID, NULL, 0, &id, -					sizeof(id)); +			blogic_cmd(host_adapter, BLOGIC_GET_BOARD_ID, NULL, 0, +					&id, sizeof(id));  			if (id.fw_ver_digit1 == '5')  				force_scan_order =  					autoscsi_byte45.force_scan_order; @@ -2762,8 +2764,8 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)  			/*  			   Place CCB back on the Host Adapter's free list.  			 */ -			blogic_dealloc_ccb(ccb); -#if 0				/* this needs to be redone different for new EH */ +			blogic_dealloc_ccb(ccb, 1); +#if 0			/* this needs to be redone different for new EH */  			/*  			   Bus Device Reset CCBs have the command field  			   non-NULL only when a Bus Device Reset was requested @@ -2791,7 +2793,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)  				if (ccb->status == BLOGIC_CCB_RESET &&  						ccb->tgt_id == tgt_id) {  					command = ccb->command; -					blogic_dealloc_ccb(ccb); +					blogic_dealloc_ccb(ccb, 1);  					adapter->active_cmds[tgt_id]--;  					command->result = DID_RESET << 16;  					command->scsi_done(command); @@ -2862,7 +2864,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter)  			/*  			   Place CCB back on the Host Adapter's free list.  			 */ -			blogic_dealloc_ccb(ccb); +			blogic_dealloc_ccb(ccb, 1);  			/*  			   Call the SCSI Command Completion Routine.  			 */ @@ -3034,6 +3036,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,  	int buflen = scsi_bufflen(command);  	int count;  	struct blogic_ccb *ccb; +	dma_addr_t sense_buf;  	/*  	   SCSI REQUEST_SENSE commands will be executed automatically by the @@ -3179,10 +3182,17 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,  	}  	memcpy(ccb->cdb, cdb, cdblen);  	ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE; -	ccb->sensedata = pci_map_single(adapter->pci_device, +	ccb->command = command; +	sense_buf = pci_map_single(adapter->pci_device,  				command->sense_buffer, ccb->sense_datalen,  				PCI_DMA_FROMDEVICE); -	ccb->command = command; +	if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) { +		blogic_err("DMA mapping for sense data buffer failed\n", +				adapter); +		blogic_dealloc_ccb(ccb, 0); +		return SCSI_MLQUEUE_HOST_BUSY; +	} +	ccb->sensedata = sense_buf;  	command->scsi_done = comp_cb;  	if (blogic_multimaster_type(adapter)) {  		/* @@ -3203,7 +3213,7 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,  			if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START,  						ccb)) {  				blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter); -				blogic_dealloc_ccb(ccb); +				blogic_dealloc_ccb(ccb, 1);  				command->result = DID_ERROR << 16;  				command->scsi_done(command);  			} @@ -3337,7 +3347,7 @@ static int blogic_resetadapter(struct blogic_adapter *adapter, bool hard_reset)  	for (ccb = adapter->all_ccbs; ccb != NULL; ccb = ccb->next_all)  		if (ccb->status == BLOGIC_CCB_ACTIVE) -			blogic_dealloc_ccb(ccb); +			blogic_dealloc_ccb(ccb, 1);  	/*  	 * Wait a few seconds between the Host Adapter Hard Reset which  	 * initiates a SCSI Bus Reset and issuing any SCSI Commands.  Some diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index fe25677a551..baca5897039 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -204,9 +204,9 @@ config SCSI_MULTI_LUN  	  Some devices support more than one LUN (Logical Unit Number) in order  	  to allow access to several media, e.g. CD jukebox, USB card reader,  	  mobile phone in mass storage mode. This option forces the kernel to -	  probe for all LUNs by default. This setting can be overriden by +	  probe for all LUNs by default. This setting can be overridden by  	  max_luns boot/module parameter. Note that this option does not affect -	  devices conforming to SCSI-3 or higher as they can explicitely report +	  devices conforming to SCSI-3 or higher as they can explicitly report  	  their number of LUNs. It is safe to say Y here unless you have one of  	  those rare devices which reacts in an unexpected way when probed for  	  multiple LUNs. @@ -263,6 +263,9 @@ config SCSI_SCAN_ASYNC  	  You can override this choice by specifying "scsi_mod.scan=sync"  	  or async on the kernel's command line. +	  Note that this setting also affects whether resuming from +	  system suspend will be performed asynchronously. +  menu "SCSI Transports"  	depends on SCSI @@ -499,47 +502,6 @@ config SCSI_AACRAID  source "drivers/scsi/aic7xxx/Kconfig.aic7xxx" - -config SCSI_AIC7XXX_OLD -	tristate "Adaptec AIC7xxx support (old driver)" -	depends on (ISA || EISA || PCI ) && SCSI -	help -	  WARNING This driver is an older aic7xxx driver and is no longer -	  under active development.  Adaptec, Inc. is writing a new driver to -	  take the place of this one, and it is recommended that whenever -	  possible, people should use the new Adaptec written driver instead -	  of this one.  This driver will eventually be phased out entirely. - -	  This is support for the various aic7xxx based Adaptec SCSI -	  controllers. These include the 274x EISA cards; 284x VLB cards; -	  2902, 2910, 293x, 294x, 394x, 3985 and several other PCI and -	  motherboard based SCSI controllers from Adaptec. It does not support -	  the AAA-13x RAID controllers from Adaptec, nor will it likely ever -	  support them. It does not support the 2920 cards from Adaptec that -	  use the Future Domain SCSI controller chip. For those cards, you -	  need the "Future Domain 16xx SCSI support" driver. - -	  In general, if the controller is based on an Adaptec SCSI controller -	  chip from the aic777x series or the aic78xx series, this driver -	  should work. The only exception is the 7810 which is specifically -	  not supported (that's the RAID controller chip on the AAA-13x -	  cards). - -	  Note that the AHA2920 SCSI host adapter is *not* supported by this -	  driver; choose "Future Domain 16xx SCSI support" instead if you have -	  one of those. - -	  Information on the configuration options for this controller can be -	  found by checking the help file for each of the available -	  configuration options. You should read -	  <file:Documentation/scsi/aic7xxx_old.txt> at a minimum before -	  contacting the maintainer with any questions.  The SCSI-HOWTO, -	  available from <http://www.tldp.org/docs.html#howto>, can also -	  be of great help. - -	  To compile this driver as a module, choose M here: the -	  module will be called aic7xxx_old. -  source "drivers/scsi/aic7xxx/Kconfig.aic79xx"  source "drivers/scsi/aic94xx/Kconfig"  source "drivers/scsi/mvsas/Kconfig" @@ -1811,6 +1773,7 @@ config SCSI_BFA_FC  config SCSI_VIRTIO  	tristate "virtio-scsi support"  	depends on VIRTIO +	select BLK_DEV_INTEGRITY  	help            This is the virtual HBA driver for virtio.  If the kernel will            be used in a virtual machine, say Y or M. diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 149bb6bf184..e172d4f8e02 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -70,7 +70,6 @@ obj-$(CONFIG_SCSI_AHA1740)	+= aha1740.o  obj-$(CONFIG_SCSI_AIC7XXX)	+= aic7xxx/  obj-$(CONFIG_SCSI_AIC79XX)	+= aic7xxx/  obj-$(CONFIG_SCSI_AACRAID)	+= aacraid/ -obj-$(CONFIG_SCSI_AIC7XXX_OLD)	+= aic7xxx_old.o  obj-$(CONFIG_SCSI_AIC94XX)	+= aic94xx/  obj-$(CONFIG_SCSI_PM8001)	+= pm8001/  obj-$(CONFIG_SCSI_ISCI)		+= isci/ diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 1e9d6ad9302..93d13fc9a29 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -27,8 +27,6 @@   */  /* - * $Log: NCR5380.c,v $ -   * Revision 1.10 1998/9/2	Alan Cox   *				(alan@lxorguk.ukuu.org.uk)   * Fixed up the timer lockups reported so far. Things still suck. Looking  @@ -89,13 +87,6 @@  #include <scsi/scsi_dbg.h>  #include <scsi/scsi_transport_spi.h> -#ifndef NDEBUG -#define NDEBUG 0 -#endif -#ifndef NDEBUG_ABORT -#define NDEBUG_ABORT 0 -#endif -  #if (NDEBUG & NDEBUG_LISTS)  #define LIST(x,y) {printk("LINE:%d   Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }  #define REMOVE(w,x,y,z) {printk("LINE:%d   Removing: %p->%p  %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); } @@ -584,7 +575,7 @@ static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,  	NCR5380_setup(instance);  	for (trying_irqs = i = 0, mask = 1; i < 16; ++i, mask <<= 1) -		if ((mask & possible) && (request_irq(i, &probe_intr, IRQF_DISABLED, "NCR-probe", NULL) == 0)) +		if ((mask & possible) && (request_irq(i, &probe_intr, 0, "NCR-probe", NULL) == 0))  			trying_irqs |= mask;  	timeout = jiffies + (250 * HZ / 1000); @@ -1005,7 +996,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)  		LIST(cmd, tmp);  		tmp->host_scribble = (unsigned char *) cmd;  	} -	dprintk(NDEBUG_QUEUES, ("scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail")); +	dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");  	/* Run the coroutine if it isn't already running. */  	/* Kick off command processing */ @@ -1040,7 +1031,7 @@ static void NCR5380_main(struct work_struct *work)  		/* Lock held here */  		done = 1;  		if (!hostdata->connected && !hostdata->selecting) { -			dprintk(NDEBUG_MAIN, ("scsi%d : not connected\n", instance->host_no)); +			dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no);  			/*  			 * Search through the issue_queue for a command destined  			 * for a target that's not busy. @@ -1048,7 +1039,7 @@ static void NCR5380_main(struct work_struct *work)  			for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble)   			{  				if (prev != tmp) -					dprintk(NDEBUG_LISTS, ("MAIN tmp=%p   target=%d   busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun)); +					dprintk(NDEBUG_LISTS, "MAIN tmp=%p   target=%d   busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);  				/*  When we find one, remove it from the issue queue. */  				if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) {  					if (prev) { @@ -1066,7 +1057,7 @@ static void NCR5380_main(struct work_struct *work)  					 * On failure, we must add the command back to the  					 *   issue queue so we can keep trying.   					 */ -					dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->target, tmp->lun)); +					dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun);  					/*  					 * A successful selection is defined as one that  @@ -1095,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work)  						tmp->host_scribble = (unsigned char *) hostdata->issue_queue;  						hostdata->issue_queue = tmp;  						done = 0; -						dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no)); +						dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no);  					}  					/* lock held here still */  				}	/* if target/lun is not busy */ @@ -1125,9 +1116,9 @@ static void NCR5380_main(struct work_struct *work)  #endif  		    && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies))  		    ) { -			dprintk(NDEBUG_MAIN, ("scsi%d : main() : performing information transfer\n", instance->host_no)); +			dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no);  			NCR5380_information_transfer(instance); -			dprintk(NDEBUG_MAIN, ("scsi%d : main() : done set false\n", instance->host_no)); +			dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no);  			done = 0;  		} else  			break; @@ -1159,8 +1150,8 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)  	unsigned char basr;  	unsigned long flags; -	dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n", -		instance->irq)); +	dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n", +		instance->irq);  	do {  		done = 1; @@ -1173,14 +1164,14 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)  			NCR5380_dprint(NDEBUG_INTR, instance);  			if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {  				done = 0; -				dprintk(NDEBUG_INTR, ("scsi%d : SEL interrupt\n", instance->host_no)); +				dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no);  				NCR5380_reselect(instance);  				(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);  			} else if (basr & BASR_PARITY_ERROR) { -				dprintk(NDEBUG_INTR, ("scsi%d : PARITY interrupt\n", instance->host_no)); +				dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no);  				(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);  			} else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { -				dprintk(NDEBUG_INTR, ("scsi%d : RESET interrupt\n", instance->host_no)); +				dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no);  				(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);  			} else {  #if defined(REAL_DMA) @@ -1210,7 +1201,7 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)  					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);  				}  #else -				dprintk(NDEBUG_INTR, ("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG))); +				dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));  				(void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);  #endif  			} @@ -1304,7 +1295,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)  	hostdata->restart_select = 0;  	NCR5380_dprint(NDEBUG_ARBITRATION, instance); -	dprintk(NDEBUG_ARBITRATION, ("scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id)); +	dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id);  	/*   	 * Set the phase bits to 0, otherwise the NCR5380 won't drive the  @@ -1333,7 +1324,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)  		goto failed;  	} -	dprintk(NDEBUG_ARBITRATION, ("scsi%d : arbitration complete\n", instance->host_no)); +	dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no);  	/*   	 * The arbitration delay is 2.2us, but this is a minimum and there is  @@ -1347,7 +1338,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)  	/* Check for lost arbitration */  	if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {  		NCR5380_write(MODE_REG, MR_BASE); -		dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no)); +		dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no);  		goto failed;  	}  	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL); @@ -1360,7 +1351,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)  	    (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {  		NCR5380_write(MODE_REG, MR_BASE);  		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); -		dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no)); +		dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no);  		goto failed;  	}  	/*  @@ -1370,7 +1361,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)  	udelay(2); -	dprintk(NDEBUG_ARBITRATION, ("scsi%d : won arbitration\n", instance->host_no)); +	dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no);  	/*   	 * Now that we have won arbitration, start Selection process, asserting  @@ -1422,7 +1413,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)  	udelay(1); -	dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd))); +	dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd));  	/*   	 * The SCSI specification calls for a 250 ms timeout for the actual  @@ -1487,7 +1478,7 @@ part2:  		collect_stats(hostdata, cmd);  		cmd->scsi_done(cmd);  		NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); -		dprintk(NDEBUG_SELECTION, ("scsi%d : target did not respond within 250ms\n", instance->host_no)); +		dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no);  		NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);  		return 0;  	} @@ -1520,7 +1511,7 @@ part2:  		goto failed;  	} -	dprintk(NDEBUG_SELECTION, ("scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id)); +	dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id);  	tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun);  	len = 1; @@ -1530,7 +1521,7 @@ part2:  	data = tmp;  	phase = PHASE_MSGOUT;  	NCR5380_transfer_pio(instance, &phase, &len, &data); -	dprintk(NDEBUG_SELECTION, ("scsi%d : nexus established.\n", instance->host_no)); +	dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no);  	/* XXX need to handle errors here */  	hostdata->connected = cmd;  	hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); @@ -1583,9 +1574,9 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase  	NCR5380_setup(instance);  	if (!(p & SR_IO)) -		dprintk(NDEBUG_PIO, ("scsi%d : pio write %d bytes\n", instance->host_no, c)); +		dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c);  	else -		dprintk(NDEBUG_PIO, ("scsi%d : pio read %d bytes\n", instance->host_no, c)); +		dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c);  	/*   	 * The NCR5380 chip will only drive the SCSI bus when the  @@ -1620,11 +1611,11 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase  			break;  		} -		dprintk(NDEBUG_HANDSHAKE, ("scsi%d : REQ detected\n", instance->host_no)); +		dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no);  		/* Check for phase mismatch */  		if ((tmp & PHASE_MASK) != p) { -			dprintk(NDEBUG_HANDSHAKE, ("scsi%d : phase mismatch\n", instance->host_no)); +			dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no);  			NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance);  			break;  		} @@ -1660,7 +1651,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase  		/* FIXME - if this fails bus reset ?? */  		NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ); -		dprintk(NDEBUG_HANDSHAKE, ("scsi%d : req false, handshake complete\n", instance->host_no)); +		dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no);  /*   * We have several special cases to consider during REQ/ACK handshaking :  @@ -1681,7 +1672,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase  		}  	} while (--c); -	dprintk(NDEBUG_PIO, ("scsi%d : residual %d\n", instance->host_no, c)); +	dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c);  	*count = c;  	*data = d; @@ -1828,7 +1819,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase  		c -= 2;  	}  #endif -	dprintk(NDEBUG_DMA, ("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d)); +	dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);  	hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c);  #endif @@ -1857,7 +1848,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase  		NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);  #endif				/* def REAL_DMA */ -	dprintk(NDEBUG_DMA, ("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG))); +	dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));  	/*   	 *	On the PAS16 at least I/O recovery delays are not needed here. @@ -1934,7 +1925,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase  		}  	} -	dprintk(NDEBUG_DMA, ("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG))); +	dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG));  	NCR5380_write(MODE_REG, MR_BASE);  	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); @@ -1948,7 +1939,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase  #ifdef READ_OVERRUNS  	if (*phase == p && (p & SR_IO) && residue == 0) {  		if (overrun) { -			dprintk(NDEBUG_DMA, ("Got an input overrun, using saved byte\n")); +			dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");  			**data = saved_data;  			*data += 1;  			*count -= 1; @@ -1957,13 +1948,13 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase  			printk("No overrun??\n");  			cnt = toPIO = 2;  		} -		dprintk(NDEBUG_DMA, ("Doing %d-byte PIO to 0x%X\n", cnt, *data)); +		dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data);  		NCR5380_transfer_pio(instance, phase, &cnt, data);  		*count -= toPIO - cnt;  	}  #endif -	dprintk(NDEBUG_DMA, ("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count))); +	dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count));  	return 0;  #elif defined(REAL_DMA) @@ -2013,7 +2004,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase  		foo = NCR5380_pwrite(instance, d, c);  #else  		int timeout; -		dprintk(NDEBUG_C400_PWRITE, ("About to pwrite %d bytes\n", c)); +		dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c);  		if (!(foo = NCR5380_pwrite(instance, d, c))) {  			/*  			 * Wait for the last byte to be sent.  If REQ is being asserted for  @@ -2024,19 +2015,19 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase  				while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH));  				if (!timeout) -					dprintk(NDEBUG_LAST_BYTE_SENT, ("scsi%d : timed out on last byte\n", instance->host_no)); +					dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no);  				if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {  					hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;  					if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {  						hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT; -						dprintk(NDEBUG_LAST_WRITE_SENT, ("scsi%d : last bit sent works\n", instance->host_no)); +						dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no);  					}  				}  			} else { -				dprintk(NDEBUG_C400_PWRITE, ("Waiting for LASTBYTE\n")); +				dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n");  				while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT)); -				dprintk(NDEBUG_C400_PWRITE, ("Got LASTBYTE\n")); +				dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n");  			}  		}  #endif @@ -2045,9 +2036,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase  	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);  	if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) { -		dprintk(NDEBUG_C400_PWRITE, ("53C400w: Checking for IRQ\n")); +		dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n");  		if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) { -			dprintk(NDEBUG_C400_PWRITE, ("53C400w:    got it, reading reset interrupt reg\n")); +			dprintk(NDEBUG_C400_PWRITE, "53C400w:    got it, reading reset interrupt reg\n");  			NCR5380_read(RESET_PARITY_INTERRUPT_REG);  		} else {  			printk("53C400w:    IRQ NOT THERE!\n"); @@ -2139,7 +2130,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  					--cmd->SCp.buffers_residual;  					cmd->SCp.this_residual = cmd->SCp.buffer->length;  					cmd->SCp.ptr = sg_virt(cmd->SCp.buffer); -					dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual)); +					dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual);  				}  				/*  				 * The preferred transfer method is going to be  @@ -2219,7 +2210,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  				case LINKED_FLG_CMD_COMPLETE:  					/* Accept message by clearing ACK */  					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); -					dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun)); +					dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun);  					/*   					 * Sanity check : A linked command should only terminate with  					 * one of these messages if there are more linked commands @@ -2235,7 +2226,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  					/* The next command is still part of this process */  					cmd->next_link->tag = cmd->tag;  					cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); -					dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun)); +					dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);  					collect_stats(hostdata, cmd);  					cmd->scsi_done(cmd);  					cmd = hostdata->connected; @@ -2247,7 +2238,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  					sink = 1;  					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);  					hostdata->connected = NULL; -					dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun)); +					dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun);  					hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);  					/*  @@ -2281,13 +2272,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  					if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {  						scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); -						dprintk(NDEBUG_AUTOSENSE, ("scsi%d : performing request sense\n", instance->host_no)); +						dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no);  						LIST(cmd, hostdata->issue_queue);  						cmd->host_scribble = (unsigned char *)  						    hostdata->issue_queue;  						hostdata->issue_queue = (Scsi_Cmnd *) cmd; -						dprintk(NDEBUG_QUEUES, ("scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no)); +						dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no);  					} else  #endif				/* def AUTOSENSE */  					{ @@ -2327,7 +2318,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  						    hostdata->disconnected_queue;  						hostdata->connected = NULL;  						hostdata->disconnected_queue = cmd; -						dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d lun %d was moved from connected to" "  the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun)); +						dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" "  the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun);  						/*   						 * Restore phase bits to 0 so an interrupted selection,   						 * arbitration can resume. @@ -2373,14 +2364,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  					extended_msg[0] = EXTENDED_MESSAGE;  					/* Accept first byte by clearing ACK */  					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); -					dprintk(NDEBUG_EXTENDED, ("scsi%d : receiving extended message\n", instance->host_no)); +					dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no);  					len = 2;  					data = extended_msg + 1;  					phase = PHASE_MSGIN;  					NCR5380_transfer_pio(instance, &phase, &len, &data); -					dprintk(NDEBUG_EXTENDED, ("scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2])); +					dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]);  					if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) {  						/* Accept third byte by clearing ACK */ @@ -2390,7 +2381,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  						phase = PHASE_MSGIN;  						NCR5380_transfer_pio(instance, &phase, &len, &data); -						dprintk(NDEBUG_EXTENDED, ("scsi%d : message received, residual %d\n", instance->host_no, len)); +						dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len);  						switch (extended_msg[2]) {  						case EXTENDED_SDTR: @@ -2456,7 +2447,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  				NCR5380_transfer_pio(instance, &phase, &len, &data);  				if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) {  					NCR5380_set_timer(hostdata, USLEEP_SLEEP); -					dprintk(NDEBUG_USLEEP, ("scsi%d : issued command, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); +					dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires);  					return;  				}  				break; @@ -2468,7 +2459,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  				break;  			default:  				printk("scsi%d : unknown phase\n", instance->host_no); -				NCR5380_dprint(NDEBUG_ALL, instance); +				NCR5380_dprint(NDEBUG_ANY, instance);  			}	/* switch(phase) */  		}		/* if (tmp * SR_REQ) */  		else { @@ -2476,7 +2467,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {  			 */  			if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) {  				NCR5380_set_timer(hostdata, USLEEP_SLEEP); -				dprintk(NDEBUG_USLEEP, ("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no, hostdata->time_expires)); +				dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires);  				return;  			}  		} @@ -2517,7 +2508,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {  	hostdata->restart_select = 1;  	target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); -	dprintk(NDEBUG_SELECTION, ("scsi%d : reselect\n", instance->host_no)); +	dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no);  	/*   	 * At this point, we have detected that our SCSI ID is on the bus, @@ -2597,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {  		do_abort(instance);  	} else {  		hostdata->connected = tmp; -		dprintk(NDEBUG_RESELECTION, ("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->target, tmp->lun, tmp->tag)); +		dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag);  	}  } @@ -2682,8 +2673,8 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {  	NCR5380_setup(instance); -	dprintk(NDEBUG_ABORT, ("scsi%d : abort called\n", instance->host_no)); -	dprintk(NDEBUG_ABORT, ("        basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG))); +	dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no); +	dprintk(NDEBUG_ABORT, "        basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));  #if 0  /* @@ -2693,7 +2684,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {   */  	if (hostdata->connected == cmd) { -		dprintk(NDEBUG_ABORT, ("scsi%d : aborting connected command\n", instance->host_no)); +		dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no);  		hostdata->aborted = 1;  /*   * We should perform BSY checking, and make sure we haven't slipped @@ -2721,14 +2712,14 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {   *          from the issue queue.   */ -	dprintk(NDEBUG_ABORT, ("scsi%d : abort going into loop.\n", instance->host_no)); +	dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no);  	for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble)  		if (cmd == tmp) {  			REMOVE(5, *prev, tmp, tmp->host_scribble);  			(*prev) = (Scsi_Cmnd *) tmp->host_scribble;  			tmp->host_scribble = NULL;  			tmp->result = DID_ABORT << 16; -			dprintk(NDEBUG_ABORT, ("scsi%d : abort removed command from issue queue.\n", instance->host_no)); +			dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no);  			tmp->scsi_done(tmp);  			return SUCCESS;  		} @@ -2750,7 +2741,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {   */  	if (hostdata->connected) { -		dprintk(NDEBUG_ABORT, ("scsi%d : abort failed, command connected.\n", instance->host_no)); +		dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no);  		return FAILED;  	}  /* @@ -2780,11 +2771,11 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {  	for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)  		if (cmd == tmp) { -			dprintk(NDEBUG_ABORT, ("scsi%d : aborting disconnected command.\n", instance->host_no)); +			dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no);  			if (NCR5380_select(instance, cmd, (int) cmd->tag))  				return FAILED; -			dprintk(NDEBUG_ABORT, ("scsi%d : nexus reestablished.\n", instance->host_no)); +			dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no);  			do_abort(instance); diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index 14964d0a0e9..c79ddfa6f53 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -21,10 +21,6 @@   * 1+ (800) 334-5454   */ -/* - * $Log: NCR5380.h,v $ - */ -  #ifndef NCR5380_H  #define NCR5380_H @@ -60,6 +56,9 @@  #define NDEBUG_C400_PREAD	0x100000  #define NDEBUG_C400_PWRITE	0x200000  #define NDEBUG_LISTS		0x400000 +#define NDEBUG_ABORT		0x800000 +#define NDEBUG_TAGS		0x1000000 +#define NDEBUG_MERGING		0x2000000  #define NDEBUG_ANY		0xFFFFFFFFUL @@ -292,9 +291,24 @@ struct NCR5380_hostdata {  #ifdef __KERNEL__ -#define dprintk(a,b)			do {} while(0) -#define NCR5380_dprint(a,b)		do {} while(0) -#define NCR5380_dprint_phase(a,b)	do {} while(0) +#ifndef NDEBUG +#define NDEBUG (0) +#endif + +#define dprintk(flg, fmt, ...) \ +	do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0) + +#if NDEBUG +#define NCR5380_dprint(flg, arg) \ +	do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0) +#define NCR5380_dprint_phase(flg, arg) \ +	do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0) +static void NCR5380_print_phase(struct Scsi_Host *instance); +static void NCR5380_print(struct Scsi_Host *instance); +#else +#define NCR5380_dprint(flg, arg)       do {} while (0) +#define NCR5380_dprint_phase(flg, arg) do {} while (0) +#endif  #if defined(AUTOPROBE_IRQ)  static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible); @@ -307,10 +321,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);  #endif  static void NCR5380_main(struct work_struct *work);  static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance); -#ifdef NDEBUG -static void NCR5380_print_phase(struct Scsi_Host *instance); -static void NCR5380_print(struct Scsi_Host *instance); -#endif  static int NCR5380_abort(Scsi_Cmnd * cmd);  static int NCR5380_bus_reset(Scsi_Cmnd * cmd);  static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c index 30fa38a0ad3..9176bfbd574 100644 --- a/drivers/scsi/a2091.c +++ b/drivers/scsi/a2091.c @@ -201,7 +201,7 @@ static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)  	instance->irq = IRQ_AMIGA_PORTS;  	instance->unique_id = z->slotaddr; -	regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start); +	regs = ZTWO_VADDR(z->resource.start);  	regs->DAWR = DAWR_A2091;  	wdregs.SASR = ®s->SASR; diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index c0f4f4290dd..dd5b64726dd 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -220,7 +220,7 @@ static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)  	instance->irq = IRQ_AMIGA_PORTS; -	regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start); +	regs = ZTWO_VADDR(res->start);  	regs->DAWR = DAWR_A3000;  	wdregs.SASR = ®s->SASR; diff --git a/drivers/scsi/a4000t.c b/drivers/scsi/a4000t.c index 70c521f79f7..f5a2ab41543 100644 --- a/drivers/scsi/a4000t.c +++ b/drivers/scsi/a4000t.c @@ -56,7 +56,7 @@ static int __init amiga_a4000t_scsi_probe(struct platform_device *pdev)  	scsi_addr = res->start + A4000T_SCSI_OFFSET;  	/* Fill in the required pieces of hostdata */ -	hostdata->base = (void __iomem *)ZTWO_VADDR(scsi_addr); +	hostdata->base = ZTWO_VADDR(scsi_addr);  	hostdata->clock = 50;  	hostdata->chip710 = 1;  	hostdata->dmode_extra = DMODE_FC2; diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 9323d058706..eaaf8705a5f 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -12,7 +12,7 @@   *----------------------------------------------------------------------------*/  #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 30200 +# define AAC_DRIVER_BUILD 30300  # define AAC_DRIVER_BRANCH "-ms"  #endif  #define MAXIMUM_NUM_CONTAINERS	32 diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c index d85ac1a9d2c..fbcd48d0bfc 100644 --- a/drivers/scsi/aacraid/commctrl.c +++ b/drivers/scsi/aacraid/commctrl.c @@ -511,7 +511,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)  		goto cleanup;  	} -	if (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr))) { +	if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) || +	    (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {  		rcode = -EINVAL;  		goto cleanup;  	} diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 408a42ef787..4921ed19a02 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -771,6 +771,8 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long  static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)  {  	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; +	if (!capable(CAP_SYS_RAWIO)) +		return -EPERM;  	return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);  } @@ -1079,6 +1081,7 @@ static struct scsi_host_template aac_driver_template = {  #endif  	.use_clustering			= ENABLE_CLUSTERING,  	.emulated			= 1, +	.no_write_same			= 1,  };  static void __aac_shutdown(struct aac_dev * aac) diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index dada38aeacc..5c6a8703f53 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c @@ -480,7 +480,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)  static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)  { -	u32 var; +	u32 var = 0;  	if (!(dev->supplement_adapter_info.SupportedOptions2 &  	  AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) { @@ -500,13 +500,14 @@ static int aac_rx_restart_adapter(struct aac_dev *dev, int bled)  		if (bled && (bled != -ETIMEDOUT))  			return -EINVAL;  	} -	if (bled || (var == 0x3803000F)) { /* USE_OTHER_METHOD */ +	if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */  		rx_writel(dev, MUnit.reserved2, 3);  		msleep(5000); /* Delay 5 seconds */  		var = 0x00000001;  	} -	if (var != 0x00000001) +	if (bled && (var != 0x00000001))  		return -EINVAL; +	ssleep(5);  	if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)  		return -ENODEV;  	if (startup_timeout < 300) @@ -646,7 +647,7 @@ int _aac_rx_init(struct aac_dev *dev)  	dev->sync_mode = 0;	/* sync. mode not supported */  	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);  	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, -			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { +			IRQF_SHARED, "aacraid", dev) < 0) {  		if (dev->msi)  			pci_disable_msi(dev->pdev);  		printk(KERN_ERR "%s%d: Interrupt unavailable.\n", diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 2244f315f33..e66477c9824 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c @@ -387,8 +387,7 @@ int aac_sa_init(struct aac_dev *dev)  		goto error_irq;  	dev->sync_mode = 0;	/* sync. mode not supported */  	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, -			IRQF_SHARED|IRQF_DISABLED, -			"aacraid", (void *)dev ) < 0) { +			IRQF_SHARED, "aacraid", (void *)dev) < 0) {  		printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",  			name, instance);  		goto error_iounmap; diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 7e17107643d..9c65aed2621 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -647,7 +647,7 @@ int aac_src_init(struct aac_dev *dev)  	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);  	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, -			IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { +			IRQF_SHARED, "aacraid", dev) < 0) {  		if (dev->msi)  			pci_disable_msi(dev->pdev); @@ -804,7 +804,7 @@ int aac_srcv_init(struct aac_dev *dev)  		goto error_iounmap;  	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);  	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, -		IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { +		IRQF_SHARED, "aacraid", dev) < 0) {  		if (dev->msi)  			pci_disable_msi(dev->pdev);  		printk(KERN_ERR "%s%d: Interrupt unavailable.\n", diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index c67e401954c..d8145888e66 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -2511,8 +2511,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)  	struct asc_board *boardp = shost_priv(s);  	printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); -	printk(" host_busy %u, host_no %d, last_reset %d,\n", -	       s->host_busy, s->host_no, (unsigned)s->last_reset); +	printk(" host_busy %u, host_no %d,\n", +	       s->host_busy, s->host_no);  	printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",  	       (ulong)s->base, (ulong)s->io_port, boardp->irq); @@ -3345,8 +3345,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)  		shost->host_no);  	seq_printf(m, -		   " host_busy %u, last_reset %lu, max_id %u, max_lun %u, max_channel %u\n", -		   shost->host_busy, shost->last_reset, shost->max_id, +		   " host_busy %u, max_id %u, max_lun %u, max_channel %u\n", +		   shost->host_busy, shost->max_id,  		   shost->max_lun, shost->max_channel);  	seq_printf(m, diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 3f7b6fee0a7..e86eb6a921f 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -857,7 +857,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)  	SETPORT(SIMODE0, 0);  	SETPORT(SIMODE1, 0); -	if( request_irq(shpnt->irq, swintr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) { +	if (request_irq(shpnt->irq, swintr, IRQF_SHARED, "aha152x", shpnt)) {  		printk(KERN_ERR "aha152x%d: irq %d busy.\n", shpnt->host_no, shpnt->irq);  		goto out_host_put;  	} @@ -891,7 +891,7 @@ struct Scsi_Host *aha152x_probe_one(struct aha152x_setup *setup)  	SETPORT(SSTAT0, 0x7f);  	SETPORT(SSTAT1, 0xef); -	if ( request_irq(shpnt->irq, intr, IRQF_DISABLED|IRQF_SHARED, "aha152x", shpnt) ) { +	if (request_irq(shpnt->irq, intr, IRQF_SHARED, "aha152x", shpnt)) {  		printk(KERN_ERR "aha152x%d: failed to reassign irq %d.\n", shpnt->host_no, shpnt->irq);  		goto out_host_put;  	} diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h index 9b059422aac..113874c1284 100644 --- a/drivers/scsi/aic7xxx/aic79xx.h +++ b/drivers/scsi/aic7xxx/aic79xx.h @@ -911,7 +911,7 @@ struct vpd_config {  	uint8_t  length;  	uint8_t  revision;  	uint8_t  device_flags; -	uint8_t  termnation_menus[2]; +	uint8_t  termination_menus[2];  	uint8_t  fifo_threshold;  	uint8_t  end_tag;  	uint8_t  vpd_checksum; diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c index 14b5f8d0e7f..cc9bd26f5d1 100644 --- a/drivers/scsi/aic7xxx/aic79xx_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c @@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)  		for (bit = 0; bit < 8; bit++) {  			if ((pci_status[i] & (0x1 << bit)) != 0) { -				static const char *s; +				const char *s;  				s = pci_status_strings[bit];  				if (i == 7/*TARG*/ && bit == 3) @@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)  		for (bit = 0; bit < 8; bit++) { -			if ((split_status[i] & (0x1 << bit)) != 0) { -				static const char *s; - -				s = split_status_strings[bit]; -				printk(s, ahd_name(ahd), +			if ((split_status[i] & (0x1 << bit)) != 0) +				printk(split_status_strings[bit], ahd_name(ahd),  				       split_status_source[i]); -			}  			if (i > 1)  				continue; -			if ((sg_split_status[i] & (0x1 << bit)) != 0) { -				static const char *s; - -				s = split_status_strings[bit]; -				printk(s, ahd_name(ahd), "SG"); -			} +			if ((sg_split_status[i] & (0x1 << bit)) != 0) +				printk(split_status_strings[bit], ahd_name(ahd), "SG");  		}  	}  	/* diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index c0c62583b54..114ff0c6e31 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -145,16 +145,6 @@ static struct scsi_transport_template *ahc_linux_transport_template = NULL;  #endif  /* - * Control collection of SCSI transfer statistics for the /proc filesystem. - * - * NOTE: Do NOT enable this when running on kernels version 1.2.x and below. - * NOTE: This does affect performance since it has to maintain statistics. - */ -#ifdef CONFIG_AIC7XXX_PROC_STATS -#define AIC7XXX_PROC_STATS -#endif - -/*   * To change the default number of tagged transactions allowed per-device,   * add a line to the lilo.conf file like:   * append="aic7xxx=verbose,tag_info:{{32,32,32,32},{32,32,32,32}}" diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h index 9df9e2ce353..8373447bd7d 100644 --- a/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h +++ b/drivers/scsi/aic7xxx/aicasm/aicasm_insformat.h @@ -209,7 +209,6 @@ struct instruction {  #define AIC_OP_JC16	0x9105  #define AIC_OP_JNC16	0x9205  #define AIC_OP_CALL16	0x9305 -#define AIC_OP_CALL16	0x9305  /* Page extension is low three bits of second opcode byte. */  #define AIC_OP_JMPF	0xA005 diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c deleted file mode 100644 index 33ec9c64340..00000000000 --- a/drivers/scsi/aic7xxx_old.c +++ /dev/null @@ -1,11149 +0,0 @@ -/*+M************************************************************************* - * Adaptec AIC7xxx device driver for Linux. - * - * Copyright (c) 1994 John Aycock - *   The University of Calgary Department of Computer Science. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING.  If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Sources include the Adaptec 1740 driver (aha1740.c), the Ultrastor 24F - * driver (ultrastor.c), various Linux kernel source, the Adaptec EISA - * config file (!adp7771.cfg), the Adaptec AHA-2740A Series User's Guide, - * the Linux Kernel Hacker's Guide, Writing a SCSI Device Driver for Linux, - * the Adaptec 1542 driver (aha1542.c), the Adaptec EISA overlay file - * (adp7770.ovl), the Adaptec AHA-2740 Series Technical Reference Manual, - * the Adaptec AIC-7770 Data Book, the ANSI SCSI specification, the - * ANSI SCSI-2 specification (draft 10c), ... - * - * -------------------------------------------------------------------------- - * - *  Modifications by Daniel M. Eischen (deischen@iworks.InterWorks.org): - * - *  Substantially modified to include support for wide and twin bus - *  adapters, DMAing of SCBs, tagged queueing, IRQ sharing, bug fixes, - *  SCB paging, and other rework of the code. - * - *  Parts of this driver were also based on the FreeBSD driver by - *  Justin T. Gibbs.  His copyright follows: - * - * --------------------------------------------------------------------------   - * Copyright (c) 1994-1997 Justin Gibbs. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - *    notice, this list of conditions, and the following disclaimer, - *    without modification, immediately at the beginning of the file. - * 2. Redistributions in binary form must reproduce the above copyright - *    notice, this list of conditions and the following disclaimer in the - *    documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - *    derived from this software without specific prior written permission. - * - * Where this Software is combined with software released under the terms of  - * the GNU General Public License ("GPL") and the terms of the GPL would require the  - * combined work to also be released under the terms of the GPL, the terms - * and conditions of this License will apply in addition to those of the - * GPL with the exception of any terms or conditions of this License that - * conflict with, or are expressly prohibited by, the GPL. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - *      $Id: aic7xxx.c,v 1.119 1997/06/27 19:39:18 gibbs Exp $ - *--------------------------------------------------------------------------- - * - *  Thanks also go to (in alphabetical order) the following: - * - *    Rory Bolt     - Sequencer bug fixes - *    Jay Estabrook - Initial DEC Alpha support - *    Doug Ledford  - Much needed abort/reset bug fixes - *    Kai Makisara  - DMAing of SCBs - * - *  A Boot time option was also added for not resetting the scsi bus. - * - *    Form:  aic7xxx=extended - *           aic7xxx=no_reset - *           aic7xxx=ultra - *           aic7xxx=irq_trigger:[0,1]  # 0 edge, 1 level - *           aic7xxx=verbose - * - *  Daniel M. Eischen, deischen@iworks.InterWorks.org, 1/23/97 - * - *  $Id: aic7xxx.c,v 4.1 1997/06/12 08:23:42 deang Exp $ - *-M*************************************************************************/ - -/*+M************************************************************************** - * - * Further driver modifications made by Doug Ledford <dledford@redhat.com> - * - * Copyright (c) 1997-1999 Doug Ledford - * - * These changes are released under the same licensing terms as the FreeBSD - * driver written by Justin Gibbs.  Please see his Copyright notice above - * for the exact terms and conditions covering my changes as well as the - * warranty statement. - * - * Modifications made to the aic7xxx.c,v 4.1 driver from Dan Eischen include - * but are not limited to: - * - *  1: Import of the latest FreeBSD sequencer code for this driver - *  2: Modification of kernel code to accommodate different sequencer semantics - *  3: Extensive changes throughout kernel portion of driver to improve - *     abort/reset processing and error hanndling - *  4: Other work contributed by various people on the Internet - *  5: Changes to printk information and verbosity selection code - *  6: General reliability related changes, especially in IRQ management - *  7: Modifications to the default probe/attach order for supported cards - *  8: SMP friendliness has been improved - * - * Overall, this driver represents a significant departure from the official - * aic7xxx driver released by Dan Eischen in two ways.  First, in the code - * itself.  A diff between the two version of the driver is now a several - * thousand line diff.  Second, in approach to solving the same problem.  The - * problem is importing the FreeBSD aic7xxx driver code to linux can be a - * difficult and time consuming process, that also can be error prone.  Dan - * Eischen's official driver uses the approach that the linux and FreeBSD - * drivers should be as identical as possible.  To that end, his next version - * of this driver will be using a mid-layer code library that he is developing - * to moderate communications between the linux mid-level SCSI code and the - * low level FreeBSD driver.  He intends to be able to essentially drop the - * FreeBSD driver into the linux kernel with only a few minor tweaks to some - * include files and the like and get things working, making for fast easy - * imports of the FreeBSD code into linux. - * - * I disagree with Dan's approach.  Not that I don't think his way of doing - * things would be nice, easy to maintain, and create a more uniform driver - * between FreeBSD and Linux.  I have no objection to those issues.  My - * disagreement is on the needed functionality.  There simply are certain - * things that are done differently in FreeBSD than linux that will cause - * problems for this driver regardless of any middle ware Dan implements. - * The biggest example of this at the moment is interrupt semantics.  Linux - * doesn't provide the same protection techniques as FreeBSD does, nor can - * they be easily implemented in any middle ware code since they would truly - * belong in the kernel proper and would effect all drivers.  For the time - * being, I see issues such as these as major stumbling blocks to the  - * reliability of code based upon such middle ware.  Therefore, I choose to - * use a different approach to importing the FreeBSD code that doesn't - * involve any middle ware type code.  My approach is to import the sequencer - * code from FreeBSD wholesale.  Then, to only make changes in the kernel - * portion of the driver as they are needed for the new sequencer semantics. - * In this way, the portion of the driver that speaks to the rest of the - * linux kernel is fairly static and can be changed/modified to solve - * any problems one might encounter without concern for the FreeBSD driver. - * - * Note: If time and experience should prove me wrong that the middle ware - * code Dan writes is reliable in its operation, then I'll retract my above - * statements.  But, for those that don't know, I'm from Missouri (in the US) - * and our state motto is "The Show-Me State".  Well, before I will put - * faith into it, you'll have to show me that it works :) - * - *_M*************************************************************************/ - -/* - * The next three defines are user configurable.  These should be the only - * defines a user might need to get in here and change.  There are other - * defines buried deeper in the code, but those really shouldn't need touched - * under normal conditions. - */ - -/* - * AIC7XXX_STRICT_PCI_SETUP - *   Should we assume the PCI config options on our controllers are set with - *   sane and proper values, or should we be anal about our PCI config - *   registers and force them to what we want?  The main advantage to - *   defining this option is on non-Intel hardware where the BIOS may not - *   have been run to set things up, or if you have one of the BIOSless - *   Adaptec controllers, such as a 2910, that don't get set up by the - *   BIOS.  However, keep in mind that we really do set the most important - *   items in the driver regardless of this setting, this only controls some - *   of the more esoteric PCI options on these cards.  In that sense, I - *   would default to leaving this off.  However, if people wish to try - *   things both ways, that would also help me to know if there are some - *   machines where it works one way but not another. - * - *   -- July 7, 17:09 - *     OK...I need this on my machine for testing, so the default is to - *     leave it defined. - * - *   -- July 7, 18:49 - *     I needed it for testing, but it didn't make any difference, so back - *     off she goes. - * - *   -- July 16, 23:04 - *     I turned it back on to try and compensate for the 2.1.x PCI code - *     which no longer relies solely on the BIOS and now tries to set - *     things itself. - */ - -#define AIC7XXX_STRICT_PCI_SETUP - -/* - * AIC7XXX_VERBOSE_DEBUGGING - *   This option enables a lot of extra printk();s in the code, surrounded - *   by if (aic7xxx_verbose ...) statements.  Executing all of those if - *   statements and the extra checks can get to where it actually does have - *   an impact on CPU usage and such, as well as code size.  Disabling this - *   define will keep some of those from becoming part of the code. - * - *   NOTE:  Currently, this option has no real effect, I will be adding the - *   various #ifdef's in the code later when I've decided a section is - *   complete and no longer needs debugging.  OK...a lot of things are now - *   surrounded by this define, so turning this off does have an impact. - */ -  -/* - * #define AIC7XXX_VERBOSE_DEBUGGING - */ -  -#include <linux/module.h> -#include <stdarg.h> -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/byteorder.h> -#include <linux/string.h> -#include <linux/errno.h> -#include <linux/kernel.h> -#include <linux/ioport.h> -#include <linux/delay.h> -#include <linux/pci.h> -#include <linux/proc_fs.h> -#include <linux/blkdev.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/smp.h> -#include <linux/interrupt.h> -#include "scsi.h" -#include <scsi/scsi_host.h> -#include "aic7xxx_old/aic7xxx.h" - -#include "aic7xxx_old/sequencer.h" -#include "aic7xxx_old/scsi_message.h" -#include "aic7xxx_old/aic7xxx_reg.h" -#include <scsi/scsicam.h> - -#include <linux/stat.h> -#include <linux/slab.h>        /* for kmalloc() */ - -#define AIC7XXX_C_VERSION  "5.2.6" - -#define ALL_TARGETS -1 -#define ALL_CHANNELS -1 -#define ALL_LUNS -1 -#define MAX_TARGETS  16 -#define MAX_LUNS     8 -#ifndef TRUE -#  define TRUE 1 -#endif -#ifndef FALSE -#  define FALSE 0 -#endif - -#if defined(__powerpc__) || defined(__i386__) || defined(__x86_64__) -#  define MMAPIO -#endif - -/* - * You can try raising me for better performance or lowering me if you have - * flaky devices that go off the scsi bus when hit with too many tagged - * commands (like some IBM SCSI-3 LVD drives). - */ -#define AIC7XXX_CMDS_PER_DEVICE 32 - -typedef struct -{ -  unsigned char tag_commands[16];   /* Allow for wide/twin adapters. */ -} adapter_tag_info_t; - -/* - * Make a define that will tell the driver not to the default tag depth - * everywhere. - */ -#define DEFAULT_TAG_COMMANDS {0, 0, 0, 0, 0, 0, 0, 0,\ -                              0, 0, 0, 0, 0, 0, 0, 0} - -/* - * Modify this as you see fit for your system.  By setting tag_commands - * to 0, the driver will use it's own algorithm for determining the - * number of commands to use (see above).  When 255, the driver will - * not enable tagged queueing for that particular device.  When positive - * (> 0) and (< 255) the values in the array are used for the queue_depth. - * Note that the maximum value for an entry is 254, but you're insane if - * you try to use that many commands on one device. - * - * In this example, the first line will disable tagged queueing for all - * the devices on the first probed aic7xxx adapter. - * - * The second line enables tagged queueing with 4 commands/LUN for IDs - * (1, 2-11, 13-15), disables tagged queueing for ID 12, and tells the - * driver to use its own algorithm for ID 1. - * - * The third line is the same as the first line. - * - * The fourth line disables tagged queueing for devices 0 and 3.  It - * enables tagged queueing for the other IDs, with 16 commands/LUN - * for IDs 1 and 4, 127 commands/LUN for ID 8, and 4 commands/LUN for - * IDs 2, 5-7, and 9-15. - */ - -/* - * NOTE: The below structure is for reference only, the actual structure - *       to modify in order to change things is found after this fake one. - * -adapter_tag_info_t aic7xxx_tag_info[] = -{ -  {DEFAULT_TAG_COMMANDS}, -  {{4, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 255, 4, 4, 4}}, -  {DEFAULT_TAG_COMMANDS}, -  {{255, 16, 4, 255, 16, 4, 4, 4, 127, 4, 4, 4, 4, 4, 4, 4}} -}; -*/ - -static adapter_tag_info_t aic7xxx_tag_info[] = -{ -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS}, -  {DEFAULT_TAG_COMMANDS} -}; - - -/* - * Define an array of board names that can be indexed by aha_type. - * Don't forget to change this when changing the types! - */ -static const char *board_names[] = { -  "AIC-7xxx Unknown",                                   /* AIC_NONE */ -  "Adaptec AIC-7810 Hardware RAID Controller",          /* AIC_7810 */ -  "Adaptec AIC-7770 SCSI host adapter",                 /* AIC_7770 */ -  "Adaptec AHA-274X SCSI host adapter",                 /* AIC_7771 */ -  "Adaptec AHA-284X SCSI host adapter",                 /* AIC_284x */ -  "Adaptec AIC-7850 SCSI host adapter",                 /* AIC_7850 */ -  "Adaptec AIC-7855 SCSI host adapter",                 /* AIC_7855 */ -  "Adaptec AIC-7860 Ultra SCSI host adapter",           /* AIC_7860 */ -  "Adaptec AHA-2940A Ultra SCSI host adapter",          /* AIC_7861 */ -  "Adaptec AIC-7870 SCSI host adapter",                 /* AIC_7870 */ -  "Adaptec AHA-294X SCSI host adapter",                 /* AIC_7871 */ -  "Adaptec AHA-394X SCSI host adapter",                 /* AIC_7872 */ -  "Adaptec AHA-398X SCSI host adapter",                 /* AIC_7873 */ -  "Adaptec AHA-2944 SCSI host adapter",                 /* AIC_7874 */ -  "Adaptec AIC-7880 Ultra SCSI host adapter",           /* AIC_7880 */ -  "Adaptec AHA-294X Ultra SCSI host adapter",           /* AIC_7881 */ -  "Adaptec AHA-394X Ultra SCSI host adapter",           /* AIC_7882 */ -  "Adaptec AHA-398X Ultra SCSI host adapter",           /* AIC_7883 */ -  "Adaptec AHA-2944 Ultra SCSI host adapter",           /* AIC_7884 */ -  "Adaptec AHA-2940UW Pro Ultra SCSI host adapter",     /* AIC_7887 */ -  "Adaptec AIC-7895 Ultra SCSI host adapter",           /* AIC_7895 */ -  "Adaptec AIC-7890/1 Ultra2 SCSI host adapter",        /* AIC_7890 */ -  "Adaptec AHA-293X Ultra2 SCSI host adapter",          /* AIC_7890 */ -  "Adaptec AHA-294X Ultra2 SCSI host adapter",          /* AIC_7890 */ -  "Adaptec AIC-7896/7 Ultra2 SCSI host adapter",        /* AIC_7896 */ -  "Adaptec AHA-394X Ultra2 SCSI host adapter",          /* AIC_7897 */ -  "Adaptec AHA-395X Ultra2 SCSI host adapter",          /* AIC_7897 */ -  "Adaptec PCMCIA SCSI controller",                     /* card bus stuff */ -  "Adaptec AIC-7892 Ultra 160/m SCSI host adapter",     /* AIC_7892 */ -  "Adaptec AIC-7899 Ultra 160/m SCSI host adapter",     /* AIC_7899 */ -}; - -/* - * There should be a specific return value for this in scsi.h, but - * it seems that most drivers ignore it. - */ -#define DID_UNDERFLOW   DID_ERROR - -/* - *  What we want to do is have the higher level scsi driver requeue - *  the command to us. There is no specific driver status for this - *  condition, but the higher level scsi driver will requeue the - *  command on a DID_BUS_BUSY error. - * - *  Upon further inspection and testing, it seems that DID_BUS_BUSY - *  will *always* retry the command.  We can get into an infinite loop - *  if this happens when we really want some sort of counter that - *  will automatically abort/reset the command after so many retries. - *  Using DID_ERROR will do just that.  (Made by a suggestion by - *  Doug Ledford 8/1/96) - */ -#define DID_RETRY_COMMAND DID_ERROR - -#define HSCSIID        0x07 -#define SCSI_RESET     0x040 - -/* - * EISA/VL-bus stuff - */ -#define MINSLOT                1 -#define MAXSLOT                15 -#define SLOTBASE(x)        ((x) << 12) -#define BASE_TO_SLOT(x) ((x) >> 12) - -/* - * Standard EISA Host ID regs  (Offset from slot base) - */ -#define AHC_HID0              0x80   /* 0,1: msb of ID2, 2-7: ID1      */ -#define AHC_HID1              0x81   /* 0-4: ID3, 5-7: LSB ID2         */ -#define AHC_HID2              0x82   /* product                        */ -#define AHC_HID3              0x83   /* firmware revision              */ - -/* - * AIC-7770 I/O range to reserve for a card - */ -#define MINREG                0xC00 -#define MAXREG                0xCFF - -#define INTDEF                0x5C      /* Interrupt Definition Register */ - -/* - * AIC-78X0 PCI registers - */ -#define        CLASS_PROGIF_REVID        0x08 -#define                DEVREVID        0x000000FFul -#define                PROGINFC        0x0000FF00ul -#define                SUBCLASS        0x00FF0000ul -#define                BASECLASS        0xFF000000ul - -#define        CSIZE_LATTIME                0x0C -#define                CACHESIZE        0x0000003Ful        /* only 5 bits */ -#define                LATTIME                0x0000FF00ul - -#define        DEVCONFIG                0x40 -#define                SCBSIZE32        0x00010000ul        /* aic789X only */ -#define                MPORTMODE        0x00000400ul        /* aic7870 only */ -#define                RAMPSM           0x00000200ul        /* aic7870 only */ -#define                RAMPSM_ULTRA2    0x00000004 -#define                VOLSENSE         0x00000100ul -#define                SCBRAMSEL        0x00000080ul -#define                SCBRAMSEL_ULTRA2 0x00000008 -#define                MRDCEN           0x00000040ul -#define                EXTSCBTIME       0x00000020ul        /* aic7870 only */ -#define                EXTSCBPEN        0x00000010ul        /* aic7870 only */ -#define                BERREN           0x00000008ul -#define                DACEN            0x00000004ul -#define                STPWLEVEL        0x00000002ul -#define                DIFACTNEGEN      0x00000001ul        /* aic7870 only */ - -#define        SCAMCTL                  0x1a                /* Ultra2 only  */ -#define        CCSCBBADDR               0xf0                /* aic7895/6/7  */ - -/* - * Define the different types of SEEPROMs on aic7xxx adapters - * and make it also represent the address size used in accessing - * its registers.  The 93C46 chips have 1024 bits organized into - * 64 16-bit words, while the 93C56 chips have 2048 bits organized - * into 128 16-bit words.  The C46 chips use 6 bits to address - * each word, while the C56 and C66 (4096 bits) use 8 bits to - * address each word. - */ -typedef enum {C46 = 6, C56_66 = 8} seeprom_chip_type; - -/* - * - * Define the format of the SEEPROM registers (16 bits). - * - */ -struct seeprom_config { - -/* - * SCSI ID Configuration Flags - */ -#define CFXFER                0x0007      /* synchronous transfer rate */ -#define CFSYNCH               0x0008      /* enable synchronous transfer */ -#define CFDISC                0x0010      /* enable disconnection */ -#define CFWIDEB               0x0020      /* wide bus device (wide card) */ -#define CFSYNCHISULTRA        0x0040      /* CFSYNC is an ultra offset */ -#define CFNEWULTRAFORMAT      0x0080      /* Use the Ultra2 SEEPROM format */ -#define CFSTART               0x0100      /* send start unit SCSI command */ -#define CFINCBIOS             0x0200      /* include in BIOS scan */ -#define CFRNFOUND             0x0400      /* report even if not found */ -#define CFMULTILUN            0x0800      /* probe mult luns in BIOS scan */ -#define CFWBCACHEYES          0x4000      /* Enable W-Behind Cache on drive */ -#define CFWBCACHENC           0xc000      /* Don't change W-Behind Cache */ -/* UNUSED                0x3000 */ -  unsigned short device_flags[16];        /* words 0-15 */ - -/* - * BIOS Control Bits - */ -#define CFSUPREM        0x0001  /* support all removable drives */ -#define CFSUPREMB       0x0002  /* support removable drives for boot only */ -#define CFBIOSEN        0x0004  /* BIOS enabled */ -/* UNUSED                0x0008 */ -#define CFSM2DRV        0x0010  /* support more than two drives */ -#define CF284XEXTEND    0x0020  /* extended translation (284x cards) */ -/* UNUSED                0x0040 */ -#define CFEXTEND        0x0080  /* extended translation enabled */ -/* UNUSED                0xFF00 */ -  unsigned short bios_control;  /* word 16 */ - -/* - * Host Adapter Control Bits - */ -#define CFAUTOTERM      0x0001  /* Perform Auto termination */ -#define CFULTRAEN       0x0002  /* Ultra SCSI speed enable (Ultra cards) */ -#define CF284XSELTO     0x0003  /* Selection timeout (284x cards) */ -#define CF284XFIFO      0x000C  /* FIFO Threshold (284x cards) */ -#define CFSTERM         0x0004  /* SCSI low byte termination */ -#define CFWSTERM        0x0008  /* SCSI high byte termination (wide card) */ -#define CFSPARITY       0x0010  /* SCSI parity */ -#define CF284XSTERM     0x0020  /* SCSI low byte termination (284x cards) */ -#define CFRESETB        0x0040  /* reset SCSI bus at boot */ -#define CFBPRIMARY      0x0100  /* Channel B primary on 7895 chipsets */ -#define CFSEAUTOTERM    0x0400  /* aic7890 Perform SE Auto Term */ -#define CFLVDSTERM      0x0800  /* aic7890 LVD Termination */ -/* UNUSED                0xF280 */ -  unsigned short adapter_control;        /* word 17 */ - -/* - * Bus Release, Host Adapter ID - */ -#define CFSCSIID        0x000F                /* host adapter SCSI ID */ -/* UNUSED                0x00F0 */ -#define CFBRTIME        0xFF00                /* bus release time */ -  unsigned short brtime_id;                /* word 18 */ - -/* - * Maximum targets - */ -#define CFMAXTARG        0x00FF        /* maximum targets */ -/* UNUSED                0xFF00 */ -  unsigned short max_targets;                /* word 19 */ - -  unsigned short res_1[11];                /* words 20-30 */ -  unsigned short checksum;                /* word 31 */ -}; - -#define SELBUS_MASK                0x0a -#define         SELNARROW        0x00 -#define         SELBUSB                0x08 -#define SINGLE_BUS                0x00 - -#define SCB_TARGET(scb)         \ -       (((scb)->hscb->target_channel_lun & TID) >> 4) -#define SCB_LUN(scb)            \ -       ((scb)->hscb->target_channel_lun & LID) -#define SCB_IS_SCSIBUS_B(scb)   \ -       (((scb)->hscb->target_channel_lun & SELBUSB) != 0) - -/* - * If an error occurs during a data transfer phase, run the command - * to completion - it's easier that way - making a note of the error - * condition in this location. This then will modify a DID_OK status - * into an appropriate error for the higher-level SCSI code. - */ -#define aic7xxx_error(cmd)        ((cmd)->SCp.Status) - -/* - * Keep track of the targets returned status. - */ -#define aic7xxx_status(cmd)        ((cmd)->SCp.sent_command) - -/* - * The position of the SCSI commands scb within the scb array. - */ -#define aic7xxx_position(cmd)        ((cmd)->SCp.have_data_in) - -/* - * The stored DMA mapping for single-buffer data transfers. - */ -#define aic7xxx_mapping(cmd)	     ((cmd)->SCp.phase) - -/* - * Get out private data area from a scsi cmd pointer - */ -#define AIC_DEV(cmd)	((struct aic_dev_data *)(cmd)->device->hostdata) - -/* - * So we can keep track of our host structs - */ -static struct aic7xxx_host *first_aic7xxx = NULL; - -/* - * As of Linux 2.1, the mid-level SCSI code uses virtual addresses - * in the scatter-gather lists.  We need to convert the virtual - * addresses to physical addresses. - */ -struct hw_scatterlist { -  unsigned int address; -  unsigned int length; -}; - -/* - * Maximum number of SG segments these cards can support. - */ -#define        AIC7XXX_MAX_SG 128 - -/* - * The maximum number of SCBs we could have for ANY type - * of card. DON'T FORGET TO CHANGE THE SCB MASK IN THE - * SEQUENCER CODE IF THIS IS MODIFIED! - */ -#define AIC7XXX_MAXSCB        255 - - -struct aic7xxx_hwscb { -/* ------------    Begin hardware supported fields    ---------------- */ -/* 0*/  unsigned char control; -/* 1*/  unsigned char target_channel_lun;       /* 4/1/3 bits */ -/* 2*/  unsigned char target_status; -/* 3*/  unsigned char SG_segment_count; -/* 4*/  unsigned int  SG_list_pointer; -/* 8*/  unsigned char residual_SG_segment_count; -/* 9*/  unsigned char residual_data_count[3]; -/*12*/  unsigned int  data_pointer; -/*16*/  unsigned int  data_count; -/*20*/  unsigned int  SCSI_cmd_pointer; -/*24*/  unsigned char SCSI_cmd_length; -/*25*/  unsigned char tag;          /* Index into our kernel SCB array. -                                     * Also used as the tag for tagged I/O -                                     */ -#define SCB_PIO_TRANSFER_SIZE  26   /* amount we need to upload/download -                                     * via PIO to initialize a transaction. -                                     */ -/*26*/  unsigned char next;         /* Used to thread SCBs awaiting selection -                                     * or disconnected down in the sequencer. -                                     */ -/*27*/  unsigned char prev; -/*28*/  unsigned int pad;           /* -                                     * Unused by the kernel, but we require -                                     * the padding so that the array of -                                     * hardware SCBs is aligned on 32 byte -                                     * boundaries so the sequencer can index -                                     */ -}; - -typedef enum { -        SCB_FREE                = 0x0000, -        SCB_DTR_SCB             = 0x0001, -        SCB_WAITINGQ            = 0x0002, -        SCB_ACTIVE              = 0x0004, -        SCB_SENSE               = 0x0008, -        SCB_ABORT               = 0x0010, -        SCB_DEVICE_RESET        = 0x0020, -        SCB_RESET               = 0x0040, -        SCB_RECOVERY_SCB        = 0x0080, -        SCB_MSGOUT_PPR          = 0x0100, -        SCB_MSGOUT_SENT         = 0x0200, -        SCB_MSGOUT_SDTR         = 0x0400, -        SCB_MSGOUT_WDTR         = 0x0800, -        SCB_MSGOUT_BITS         = SCB_MSGOUT_PPR | -                                  SCB_MSGOUT_SENT |  -                                  SCB_MSGOUT_SDTR | -                                  SCB_MSGOUT_WDTR, -        SCB_QUEUED_ABORT        = 0x1000, -        SCB_QUEUED_FOR_DONE     = 0x2000, -        SCB_WAS_BUSY            = 0x4000, -	SCB_QUEUE_FULL		= 0x8000 -} scb_flag_type; - -typedef enum { -        AHC_FNONE                 = 0x00000000, -        AHC_PAGESCBS              = 0x00000001, -        AHC_CHANNEL_B_PRIMARY     = 0x00000002, -        AHC_USEDEFAULTS           = 0x00000004, -        AHC_INDIRECT_PAGING       = 0x00000008, -        AHC_CHNLB                 = 0x00000020, -        AHC_CHNLC                 = 0x00000040, -        AHC_EXTEND_TRANS_A        = 0x00000100, -        AHC_EXTEND_TRANS_B        = 0x00000200, -        AHC_TERM_ENB_A            = 0x00000400, -        AHC_TERM_ENB_SE_LOW       = 0x00000400, -        AHC_TERM_ENB_B            = 0x00000800, -        AHC_TERM_ENB_SE_HIGH      = 0x00000800, -        AHC_HANDLING_REQINITS     = 0x00001000, -        AHC_TARGETMODE            = 0x00002000, -        AHC_NEWEEPROM_FMT         = 0x00004000, - /* -  *  Here ends the FreeBSD defined flags and here begins the linux defined -  *  flags.  NOTE: I did not preserve the old flag name during this change -  *  specifically to force me to evaluate what flags were being used properly -  *  and what flags weren't.  This way, I could clean up the flag usage on -  *  a use by use basis.  Doug Ledford -  */ -        AHC_MOTHERBOARD           = 0x00020000, -        AHC_NO_STPWEN             = 0x00040000, -        AHC_RESET_DELAY           = 0x00080000, -        AHC_A_SCANNED             = 0x00100000, -        AHC_B_SCANNED             = 0x00200000, -        AHC_MULTI_CHANNEL         = 0x00400000, -        AHC_BIOS_ENABLED          = 0x00800000, -        AHC_SEEPROM_FOUND         = 0x01000000, -        AHC_TERM_ENB_LVD          = 0x02000000, -        AHC_ABORT_PENDING         = 0x04000000, -        AHC_RESET_PENDING         = 0x08000000, -#define AHC_IN_ISR_BIT              28 -        AHC_IN_ISR                = 0x10000000, -        AHC_IN_ABORT              = 0x20000000, -        AHC_IN_RESET              = 0x40000000, -        AHC_EXTERNAL_SRAM         = 0x80000000 -} ahc_flag_type; - -typedef enum { -  AHC_NONE             = 0x0000, -  AHC_CHIPID_MASK      = 0x00ff, -  AHC_AIC7770          = 0x0001, -  AHC_AIC7850          = 0x0002, -  AHC_AIC7860          = 0x0003, -  AHC_AIC7870          = 0x0004, -  AHC_AIC7880          = 0x0005, -  AHC_AIC7890          = 0x0006, -  AHC_AIC7895          = 0x0007, -  AHC_AIC7896          = 0x0008, -  AHC_AIC7892          = 0x0009, -  AHC_AIC7899          = 0x000a, -  AHC_VL               = 0x0100, -  AHC_EISA             = 0x0200, -  AHC_PCI              = 0x0400, -} ahc_chip; - -typedef enum { -  AHC_FENONE           = 0x0000, -  AHC_ULTRA            = 0x0001, -  AHC_ULTRA2           = 0x0002, -  AHC_WIDE             = 0x0004, -  AHC_TWIN             = 0x0008, -  AHC_MORE_SRAM        = 0x0010, -  AHC_CMD_CHAN         = 0x0020, -  AHC_QUEUE_REGS       = 0x0040, -  AHC_SG_PRELOAD       = 0x0080, -  AHC_SPIOCAP          = 0x0100, -  AHC_ULTRA3           = 0x0200, -  AHC_NEW_AUTOTERM     = 0x0400, -  AHC_AIC7770_FE       = AHC_FENONE, -  AHC_AIC7850_FE       = AHC_SPIOCAP, -  AHC_AIC7860_FE       = AHC_ULTRA|AHC_SPIOCAP, -  AHC_AIC7870_FE       = AHC_FENONE, -  AHC_AIC7880_FE       = AHC_ULTRA, -  AHC_AIC7890_FE       = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA2| -                         AHC_QUEUE_REGS|AHC_SG_PRELOAD|AHC_NEW_AUTOTERM, -  AHC_AIC7895_FE       = AHC_MORE_SRAM|AHC_CMD_CHAN|AHC_ULTRA, -  AHC_AIC7896_FE       = AHC_AIC7890_FE, -  AHC_AIC7892_FE       = AHC_AIC7890_FE|AHC_ULTRA3, -  AHC_AIC7899_FE       = AHC_AIC7890_FE|AHC_ULTRA3, -} ahc_feature; - -#define SCB_DMA_ADDR(scb, addr) ((unsigned long)(addr) + (scb)->scb_dma->dma_offset) - -struct aic7xxx_scb_dma { -	unsigned long	       dma_offset;    /* Correction you have to add -					       * to virtual address to get -					       * dma handle in this region */ -	dma_addr_t	       dma_address;   /* DMA handle of the start, -					       * for unmap */ -	unsigned int	       dma_len;	      /* DMA length */ -}; - -typedef enum { -  AHC_BUG_NONE            = 0x0000, -  AHC_BUG_TMODE_WIDEODD   = 0x0001, -  AHC_BUG_AUTOFLUSH       = 0x0002, -  AHC_BUG_CACHETHEN       = 0x0004, -  AHC_BUG_CACHETHEN_DIS   = 0x0008, -  AHC_BUG_PCI_2_1_RETRY   = 0x0010, -  AHC_BUG_PCI_MWI         = 0x0020, -  AHC_BUG_SCBCHAN_UPLOAD  = 0x0040, -} ahc_bugs; - -struct aic7xxx_scb { -	struct aic7xxx_hwscb	*hscb;		/* corresponding hardware scb */ -	struct scsi_cmnd	*cmd;		/* scsi_cmnd for this scb */ -	struct aic7xxx_scb	*q_next;        /* next scb in queue */ -	volatile scb_flag_type	flags;		/* current state of scb */ -	struct hw_scatterlist	*sg_list;	/* SG list in adapter format */ -	unsigned char		tag_action; -	unsigned char		sg_count; -	unsigned char		*sense_cmd;	/* -						 * Allocate 6 characters for -						 * sense command. -						 */ -	unsigned char		*cmnd; -	unsigned int		sg_length;	/* -						 * We init this during -						 * buildscb so we don't have -						 * to calculate anything during -						 * underflow/overflow/stat code -						 */ -	void			*kmalloc_ptr; -	struct aic7xxx_scb_dma	*scb_dma; -}; - -/* - * Define a linked list of SCBs. - */ -typedef struct { -  struct aic7xxx_scb *head; -  struct aic7xxx_scb *tail; -} scb_queue_type; - -static struct { -  unsigned char errno; -  const char *errmesg; -} hard_error[] = { -  { ILLHADDR,  "Illegal Host Access" }, -  { ILLSADDR,  "Illegal Sequencer Address referenced" }, -  { ILLOPCODE, "Illegal Opcode in sequencer program" }, -  { SQPARERR,  "Sequencer Ram Parity Error" }, -  { DPARERR,   "Data-Path Ram Parity Error" }, -  { MPARERR,   "Scratch Ram/SCB Array Ram Parity Error" }, -  { PCIERRSTAT,"PCI Error detected" }, -  { CIOPARERR, "CIOBUS Parity Error" } -}; - -static unsigned char -generic_sense[] = { REQUEST_SENSE, 0, 0, 0, 255, 0 }; - -typedef struct { -  scb_queue_type free_scbs;        /* -                                    * SCBs assigned to free slot on -                                    * card (no paging required) -                                    */ -  struct aic7xxx_scb   *scb_array[AIC7XXX_MAXSCB]; -  struct aic7xxx_hwscb *hscbs; -  unsigned char  numscbs;          /* current number of scbs */ -  unsigned char  maxhscbs;         /* hardware scbs */ -  unsigned char  maxscbs;          /* max scbs including pageable scbs */ -  dma_addr_t	 hscbs_dma;	   /* DMA handle to hscbs */ -  unsigned int   hscbs_dma_len;    /* length of the above DMA area */ -  void          *hscb_kmalloc_ptr; -} scb_data_type; - -struct target_cmd { -  unsigned char mesg_bytes[4]; -  unsigned char command[28]; -}; - -#define AHC_TRANS_CUR    0x0001 -#define AHC_TRANS_ACTIVE 0x0002 -#define AHC_TRANS_GOAL   0x0004 -#define AHC_TRANS_USER   0x0008 -#define AHC_TRANS_QUITE  0x0010 -typedef struct { -  unsigned char width; -  unsigned char period; -  unsigned char offset; -  unsigned char options; -} transinfo_type; - -struct aic_dev_data { -  volatile scb_queue_type  delayed_scbs; -  volatile unsigned short  temp_q_depth; -  unsigned short           max_q_depth; -  volatile unsigned char   active_cmds; -  /* -   * Statistics Kept: -   * -   * Total Xfers (count for each command that has a data xfer), -   * broken down by reads && writes. -   * -   * Further sorted into a few bins for keeping tabs on how many commands -   * we get of various sizes. -   * -   */ -  long w_total;                          /* total writes */ -  long r_total;                          /* total reads */ -  long barrier_total;			 /* total num of REQ_BARRIER commands */ -  long ordered_total;			 /* How many REQ_BARRIER commands we -					    used ordered tags to satisfy */ -  long w_bins[6];                       /* binned write */ -  long r_bins[6];                       /* binned reads */ -  transinfo_type	cur; -  transinfo_type	goal; -#define  BUS_DEVICE_RESET_PENDING       0x01 -#define  DEVICE_RESET_DELAY             0x02 -#define  DEVICE_PRINT_DTR               0x04 -#define  DEVICE_WAS_BUSY                0x08 -#define  DEVICE_DTR_SCANNED		0x10 -#define  DEVICE_SCSI_3			0x20 -  volatile unsigned char   flags; -  unsigned needppr:1; -  unsigned needppr_copy:1; -  unsigned needsdtr:1; -  unsigned needsdtr_copy:1; -  unsigned needwdtr:1; -  unsigned needwdtr_copy:1; -  unsigned dtr_pending:1; -  struct scsi_device *SDptr; -  struct list_head list; -}; - -/* - * Define a structure used for each host adapter.  Note, in order to avoid - * problems with architectures I can't test on (because I don't have one, - * such as the Alpha based systems) which happen to give faults for - * non-aligned memory accesses, care was taken to align this structure - * in a way that guaranteed all accesses larger than 8 bits were aligned - * on the appropriate boundary.  It's also organized to try and be more - * cache line efficient.  Be careful when changing this lest you might hurt - * overall performance and bring down the wrath of the masses. - */ -struct aic7xxx_host { -  /* -   *  This is the first 64 bytes in the host struct -   */ - -  /* -   * We are grouping things here....first, items that get either read or -   * written with nearly every interrupt -   */ -	volatile long	flags; -	ahc_feature	features;	/* chip features */ -	unsigned long	base;		/* card base address */ -	volatile unsigned char  __iomem *maddr;	/* memory mapped address */ -	unsigned long	isr_count;	/* Interrupt count */ -	unsigned long	spurious_int; -	scb_data_type	*scb_data; -	struct aic7xxx_cmd_queue { -		struct scsi_cmnd *head; -		struct scsi_cmnd *tail; -	} completeq; - -	/* -	* Things read/written on nearly every entry into aic7xxx_queue() -	*/ -	volatile scb_queue_type	waiting_scbs; -	unsigned char	unpause;	/* unpause value for HCNTRL */ -	unsigned char	pause;		/* pause value for HCNTRL */ -	volatile unsigned char	qoutfifonext; -	volatile unsigned char	activescbs;	/* active scbs */ -	volatile unsigned char	max_activescbs; -	volatile unsigned char	qinfifonext; -	volatile unsigned char	*untagged_scbs; -	volatile unsigned char	*qoutfifo; -	volatile unsigned char	*qinfifo; - -	unsigned char	dev_last_queue_full[MAX_TARGETS]; -	unsigned char	dev_last_queue_full_count[MAX_TARGETS]; -	unsigned short	ultraenb; /* Gets downloaded to card as a bitmap */ -	unsigned short	discenable; /* Gets downloaded to card as a bitmap */ -	transinfo_type	user[MAX_TARGETS]; - -	unsigned char	msg_buf[13];	/* The message for the target */ -	unsigned char	msg_type; -#define MSG_TYPE_NONE              0x00 -#define MSG_TYPE_INITIATOR_MSGOUT  0x01 -#define MSG_TYPE_INITIATOR_MSGIN   0x02 -	unsigned char	msg_len;	/* Length of message */ -	unsigned char	msg_index;	/* Index into msg_buf array */ - - -	/* -	 * We put the less frequently used host structure items -	 * after the more frequently used items to try and ease -	 * the burden on the cache subsystem. -	 * These entries are not *commonly* accessed, whereas -	 * the preceding entries are accessed very often. -	 */ - -	unsigned int	irq;		/* IRQ for this adapter */ -	int		instance;	/* aic7xxx instance number */ -	int		scsi_id;	/* host adapter SCSI ID */ -	int		scsi_id_b;	/* channel B for twin adapters */ -	unsigned int	bios_address; -	int		board_name_index; -	unsigned short	bios_control;		/* bios control - SEEPROM */ -	unsigned short	adapter_control;	/* adapter control - SEEPROM */ -	struct pci_dev	*pdev; -	unsigned char	pci_bus; -	unsigned char	pci_device_fn; -	struct seeprom_config	sc; -	unsigned short	sc_type; -	unsigned short	sc_size; -	struct aic7xxx_host	*next;	/* allow for multiple IRQs */ -	struct Scsi_Host	*host;	/* pointer to scsi host */ -	struct list_head	 aic_devs; /* all aic_dev structs on host */ -	int		host_no;	/* SCSI host number */ -	unsigned long	mbase;		/* I/O memory address */ -	ahc_chip	chip;		/* chip type */ -	ahc_bugs	bugs; -	dma_addr_t	fifo_dma;	/* DMA handle for fifo arrays */ -}; - -/* - * Valid SCSIRATE values. (p. 3-17) - * Provides a mapping of transfer periods in ns/4 to the proper value to - * stick in the SCSIRATE reg to use that transfer rate. - */ -#define AHC_SYNCRATE_ULTRA3 0 -#define AHC_SYNCRATE_ULTRA2 1 -#define AHC_SYNCRATE_ULTRA  3 -#define AHC_SYNCRATE_FAST   6 -#define AHC_SYNCRATE_CRC 0x40 -#define AHC_SYNCRATE_SE  0x10 -static struct aic7xxx_syncrate { -  /* Rates in Ultra mode have bit 8 of sxfr set */ -#define                ULTRA_SXFR 0x100 -  int sxfr_ultra2; -  int sxfr; -  unsigned char period; -  const char *rate[2]; -} aic7xxx_syncrates[] = { -  { 0x42,  0x000,   9,  {"80.0", "160.0"} }, -  { 0x13,  0x000,  10,  {"40.0", "80.0"} }, -  { 0x14,  0x000,  11,  {"33.0", "66.6"} }, -  { 0x15,  0x100,  12,  {"20.0", "40.0"} }, -  { 0x16,  0x110,  15,  {"16.0", "32.0"} }, -  { 0x17,  0x120,  18,  {"13.4", "26.8"} }, -  { 0x18,  0x000,  25,  {"10.0", "20.0"} }, -  { 0x19,  0x010,  31,  {"8.0",  "16.0"} }, -  { 0x1a,  0x020,  37,  {"6.67", "13.3"} }, -  { 0x1b,  0x030,  43,  {"5.7",  "11.4"} }, -  { 0x10,  0x040,  50,  {"5.0",  "10.0"} }, -  { 0x00,  0x050,  56,  {"4.4",  "8.8" } }, -  { 0x00,  0x060,  62,  {"4.0",  "8.0" } }, -  { 0x00,  0x070,  68,  {"3.6",  "7.2" } }, -  { 0x00,  0x000,  0,   {NULL, NULL}   }, -}; - -#define CTL_OF_SCB(scb) (((scb->hscb)->target_channel_lun >> 3) & 0x1),  \ -                        (((scb->hscb)->target_channel_lun >> 4) & 0xf), \ -                        ((scb->hscb)->target_channel_lun & 0x07) - -#define CTL_OF_CMD(cmd) ((cmd->device->channel) & 0x01),  \ -                        ((cmd->device->id) & 0x0f), \ -                        ((cmd->device->lun) & 0x07) - -#define TARGET_INDEX(cmd)  ((cmd)->device->id | ((cmd)->device->channel << 3)) - -/* - * A nice little define to make doing our printks a little easier - */ - -#define WARN_LEAD KERN_WARNING "(scsi%d:%d:%d:%d) " -#define INFO_LEAD KERN_INFO "(scsi%d:%d:%d:%d) " - -/* - * XXX - these options apply unilaterally to _all_ 274x/284x/294x - *       cards in the system.  This should be fixed.  Exceptions to this - *       rule are noted in the comments. - */ - -/* - * Use this as the default queue depth when setting tagged queueing on. - */ -static unsigned int aic7xxx_default_queue_depth = AIC7XXX_CMDS_PER_DEVICE; - -/* - * Skip the scsi bus reset.  Non 0 make us skip the reset at startup.  This - * has no effect on any later resets that might occur due to things like - * SCSI bus timeouts. - */ -static unsigned int aic7xxx_no_reset = 0; -/* - * Certain PCI motherboards will scan PCI devices from highest to lowest, - * others scan from lowest to highest, and they tend to do all kinds of - * strange things when they come into contact with PCI bridge chips.  The - * net result of all this is that the PCI card that is actually used to boot - * the machine is very hard to detect.  Most motherboards go from lowest - * PCI slot number to highest, and the first SCSI controller found is the - * one you boot from.  The only exceptions to this are when a controller - * has its BIOS disabled.  So, we by default sort all of our SCSI controllers - * from lowest PCI slot number to highest PCI slot number.  We also force - * all controllers with their BIOS disabled to the end of the list.  This - * works on *almost* all computers.  Where it doesn't work, we have this - * option.  Setting this option to non-0 will reverse the order of the sort - * to highest first, then lowest, but will still leave cards with their BIOS - * disabled at the very end.  That should fix everyone up unless there are - * really strange cirumstances. - */ -static int aic7xxx_reverse_scan = 0; -/* - * Should we force EXTENDED translation on a controller. - *     0 == Use whatever is in the SEEPROM or default to off - *     1 == Use whatever is in the SEEPROM or default to on - */ -static unsigned int aic7xxx_extended = 0; -/* - * The IRQ trigger method used on EISA controllers. Does not effect PCI cards. - *   -1 = Use detected settings. - *    0 = Force Edge triggered mode. - *    1 = Force Level triggered mode. - */ -static int aic7xxx_irq_trigger = -1; -/* - * This variable is used to override the termination settings on a controller. - * This should not be used under normal conditions.  However, in the case - * that a controller does not have a readable SEEPROM (so that we can't - * read the SEEPROM settings directly) and that a controller has a buggered - * version of the cable detection logic, this can be used to force the  - * correct termination.  It is preferable to use the manual termination - * settings in the BIOS if possible, but some motherboard controllers store - * those settings in a format we can't read.  In other cases, auto term - * should also work, but the chipset was put together with no auto term - * logic (common on motherboard controllers).  In those cases, we have - * 32 bits here to work with.  That's good for 8 controllers/channels.  The - * bits are organized as 4 bits per channel, with scsi0 getting the lowest - * 4 bits in the int.  A 1 in a bit position indicates the termination setting - * that corresponds to that bit should be enabled, a 0 is disabled. - * It looks something like this: - * - *    0x0f =  1111-Single Ended Low Byte Termination on/off - *            ||\-Single Ended High Byte Termination on/off - *            |\-LVD Low Byte Termination on/off - *            \-LVD High Byte Termination on/off - * - * For non-Ultra2 controllers, the upper 2 bits are not important.  So, to - * enable both high byte and low byte termination on scsi0, I would need to - * make sure that the override_term variable was set to 0x03 (bits 0011). - * To make sure that all termination is enabled on an Ultra2 controller at - * scsi2 and only high byte termination on scsi1 and high and low byte - * termination on scsi0, I would set override_term=0xf23 (bits 1111 0010 0011) - * - * For the most part, users should never have to use this, that's why I - * left it fairly cryptic instead of easy to understand.  If you need it, - * most likely someone will be telling you what your's needs to be set to. - */ -static int aic7xxx_override_term = -1; -/* - * Certain motherboard chipset controllers tend to screw - * up the polarity of the term enable output pin.  Use this variable - * to force the correct polarity for your system.  This is a bitfield variable - * similar to the previous one, but this one has one bit per channel instead - * of four. - *    0 = Force the setting to active low. - *    1 = Force setting to active high. - * Most Adaptec cards are active high, several motherboards are active low. - * To force a 2940 card at SCSI 0 to active high and a motherboard 7895 - * controller at scsi1 and scsi2 to active low, and a 2910 card at scsi3 - * to active high, you would need to set stpwlev=0x9 (bits 1001). - * - * People shouldn't need to use this, but if you are experiencing lots of - * SCSI timeout problems, this may help.  There is one sure way to test what - * this option needs to be.  Using a boot floppy to boot the system, configure - * your system to enable all SCSI termination (in the Adaptec SCSI BIOS) and - * if needed then also pass a value to override_term to make sure that the - * driver is enabling SCSI termination, then set this variable to either 0 - * or 1.  When the driver boots, make sure there are *NO* SCSI cables - * connected to your controller.  If it finds and inits the controller - * without problem, then the setting you passed to stpwlev was correct.  If - * the driver goes into a reset loop and hangs the system, then you need the - * other setting for this variable.  If neither setting lets the machine - * boot then you have definite termination problems that may not be fixable. - */ -static int aic7xxx_stpwlev = -1; -/* - * Set this to non-0 in order to force the driver to panic the kernel - * and print out debugging info on a SCSI abort or reset cycle. - */ -static int aic7xxx_panic_on_abort = 0; -/* - * PCI bus parity checking of the Adaptec controllers.  This is somewhat - * dubious at best.  To my knowledge, this option has never actually - * solved a PCI parity problem, but on certain machines with broken PCI - * chipset configurations, it can generate tons of false error messages. - * It's included in the driver for completeness. - *   0 = Shut off PCI parity check - *  -1 = Normal polarity pci parity checking - *   1 = reverse polarity pci parity checking - * - * NOTE: you can't actually pass -1 on the lilo prompt.  So, to set this - * variable to -1 you would actually want to simply pass the variable - * name without a number.  That will invert the 0 which will result in - * -1. - */ -static int aic7xxx_pci_parity = 0; -/* - * Set this to any non-0 value to cause us to dump the contents of all - * the card's registers in a hex dump format tailored to each model of - * controller. - *  - * NOTE: THE CONTROLLER IS LEFT IN AN UNUSABLE STATE BY THIS OPTION. - *       YOU CANNOT BOOT UP WITH THIS OPTION, IT IS FOR DEBUGGING PURPOSES - *       ONLY - */ -static int aic7xxx_dump_card = 0; -/* - * Set this to a non-0 value to make us dump out the 32 bit instruction - * registers on the card after completing the sequencer download.  This - * allows the actual sequencer download to be verified.  It is possible - * to use this option and still boot up and run your system.  This is - * only intended for debugging purposes. - */ -static int aic7xxx_dump_sequencer = 0; -/* - * Certain newer motherboards have put new PCI based devices into the - * IO spaces that used to typically be occupied by VLB or EISA cards. - * This overlap can cause these newer motherboards to lock up when scanned - * for older EISA and VLB devices.  Setting this option to non-0 will - * cause the driver to skip scanning for any VLB or EISA controllers and - * only support the PCI controllers.  NOTE: this means that if the kernel - * os compiled with PCI support disabled, then setting this to non-0 - * would result in never finding any devices :) - */ -static int aic7xxx_no_probe = 0; -/* - * On some machines, enabling the external SCB RAM isn't reliable yet.  I - * haven't had time to make test patches for things like changing the - * timing mode on that external RAM either.  Some of those changes may - * fix the problem.  Until then though, we default to external SCB RAM - * off and give a command line option to enable it. - */ -static int aic7xxx_scbram = 0; -/* - * So that we can set how long each device is given as a selection timeout. - * The table of values goes like this: - *   0 - 256ms - *   1 - 128ms - *   2 - 64ms - *   3 - 32ms - * We default to 64ms because it's fast.  Some old SCSI-I devices need a - * longer time.  The final value has to be left shifted by 3, hence 0x10 - * is the final value. - */ -static int aic7xxx_seltime = 0x10; -/* - * So that insmod can find the variable and make it point to something - */ -#ifdef MODULE -static char * aic7xxx = NULL; -module_param(aic7xxx, charp, 0); -#endif - -#define VERBOSE_NORMAL         0x0000 -#define VERBOSE_NEGOTIATION    0x0001 -#define VERBOSE_SEQINT         0x0002 -#define VERBOSE_SCSIINT        0x0004 -#define VERBOSE_PROBE          0x0008 -#define VERBOSE_PROBE2         0x0010 -#define VERBOSE_NEGOTIATION2   0x0020 -#define VERBOSE_MINOR_ERROR    0x0040 -#define VERBOSE_TRACING        0x0080 -#define VERBOSE_ABORT          0x0f00 -#define VERBOSE_ABORT_MID      0x0100 -#define VERBOSE_ABORT_FIND     0x0200 -#define VERBOSE_ABORT_PROCESS  0x0400 -#define VERBOSE_ABORT_RETURN   0x0800 -#define VERBOSE_RESET          0xf000 -#define VERBOSE_RESET_MID      0x1000 -#define VERBOSE_RESET_FIND     0x2000 -#define VERBOSE_RESET_PROCESS  0x4000 -#define VERBOSE_RESET_RETURN   0x8000 -static int aic7xxx_verbose = VERBOSE_NORMAL | VERBOSE_NEGOTIATION | -           VERBOSE_PROBE;                     /* verbose messages */ - - -/**************************************************************************** - * - * We're going to start putting in function declarations so that order of - * functions is no longer important.  As needed, they are added here. - * - ***************************************************************************/ - -static int aic7xxx_release(struct Scsi_Host *host); -static void aic7xxx_set_syncrate(struct aic7xxx_host *p,  -		struct aic7xxx_syncrate *syncrate, int target, int channel, -		unsigned int period, unsigned int offset, unsigned char options, -		unsigned int type, struct aic_dev_data *aic_dev); -static void aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, -		int lun, unsigned int width, unsigned int type, -		struct aic_dev_data *aic_dev); -static void aic7xxx_panic_abort(struct aic7xxx_host *p, struct scsi_cmnd *cmd); -static void aic7xxx_print_card(struct aic7xxx_host *p); -static void aic7xxx_print_scratch_ram(struct aic7xxx_host *p); -static void aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded); -#ifdef AIC7XXX_VERBOSE_DEBUGGING -static void aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer); -#endif - -/**************************************************************************** - * - * These functions are now used.  They happen to be wrapped in useless - * inb/outb port read/writes around the real reads and writes because it - * seems that certain very fast CPUs have a problem dealing with us when - * going at full speed. - * - ***************************************************************************/ - -static unsigned char -aic_inb(struct aic7xxx_host *p, long port) -{ -#ifdef MMAPIO -  unsigned char x; -  if(p->maddr) -  { -    x = readb(p->maddr + port); -  } -  else -  { -    x = inb(p->base + port); -  } -  return(x); -#else -  return(inb(p->base + port)); -#endif -} - -static void -aic_outb(struct aic7xxx_host *p, unsigned char val, long port) -{ -#ifdef MMAPIO -  if(p->maddr) -  { -    writeb(val, p->maddr + port); -    mb(); /* locked operation in order to force CPU ordering */ -    readb(p->maddr + HCNTRL); /* dummy read to flush the PCI write */ -  } -  else -  { -    outb(val, p->base + port); -    mb(); /* locked operation in order to force CPU ordering */ -  } -#else -  outb(val, p->base + port); -  mb(); /* locked operation in order to force CPU ordering */ -#endif -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_setup - * - * Description: - *   Handle Linux boot parameters. This routine allows for assigning a value - *   to a parameter with a ':' between the parameter and the value. - *   ie. aic7xxx=unpause:0x0A,extended - *-F*************************************************************************/ -static int -aic7xxx_setup(char *s) -{ -  int   i, n; -  char *p; -  char *end; - -  static struct { -    const char *name; -    unsigned int *flag; -  } options[] = { -    { "extended",    &aic7xxx_extended }, -    { "no_reset",    &aic7xxx_no_reset }, -    { "irq_trigger", &aic7xxx_irq_trigger }, -    { "verbose",     &aic7xxx_verbose }, -    { "reverse_scan",&aic7xxx_reverse_scan }, -    { "override_term", &aic7xxx_override_term }, -    { "stpwlev", &aic7xxx_stpwlev }, -    { "no_probe", &aic7xxx_no_probe }, -    { "panic_on_abort", &aic7xxx_panic_on_abort }, -    { "pci_parity", &aic7xxx_pci_parity }, -    { "dump_card", &aic7xxx_dump_card }, -    { "dump_sequencer", &aic7xxx_dump_sequencer }, -    { "default_queue_depth", &aic7xxx_default_queue_depth }, -    { "scbram", &aic7xxx_scbram }, -    { "seltime", &aic7xxx_seltime }, -    { "tag_info",    NULL } -  }; - -  end = strchr(s, '\0'); - -  while ((p = strsep(&s, ",.")) != NULL) -  { -    for (i = 0; i < ARRAY_SIZE(options); i++) -    { -      n = strlen(options[i].name); -      if (!strncmp(options[i].name, p, n)) -      { -        if (!strncmp(p, "tag_info", n)) -        { -          if (p[n] == ':') -          { -            char *base; -            char *tok, *tok_end, *tok_end2; -            char tok_list[] = { '.', ',', '{', '}', '\0' }; -            int i, instance = -1, device = -1; -            unsigned char done = FALSE; - -            base = p; -            tok = base + n + 1;  /* Forward us just past the ':' */ -            tok_end = strchr(tok, '\0'); -            if (tok_end < end) -              *tok_end = ','; -            while(!done) -            { -              switch(*tok) -              { -                case '{': -                  if (instance == -1) -                    instance = 0; -                  else if (device == -1) -                    device = 0; -                  tok++; -                  break; -                case '}': -                  if (device != -1) -                    device = -1; -                  else if (instance != -1) -                    instance = -1; -                  tok++; -                  break; -                case ',': -                case '.': -                  if (instance == -1) -                    done = TRUE; -                  else if (device >= 0) -                    device++; -                  else if (instance >= 0) -                    instance++; -                  if ( (device >= MAX_TARGETS) ||  -                       (instance >= ARRAY_SIZE(aic7xxx_tag_info)) ) -                    done = TRUE; -                  tok++; -                  if (!done) -                  { -                    base = tok; -                  } -                  break; -                case '\0': -                  done = TRUE; -                  break; -                default: -                  done = TRUE; -                  tok_end = strchr(tok, '\0'); -                  for(i=0; tok_list[i]; i++) -                  { -                    tok_end2 = strchr(tok, tok_list[i]); -                    if ( (tok_end2) && (tok_end2 < tok_end) ) -                    { -                      tok_end = tok_end2; -                      done = FALSE; -                    } -                  } -                  if ( (instance >= 0) && (device >= 0) && -                       (instance < ARRAY_SIZE(aic7xxx_tag_info)) && -                       (device < MAX_TARGETS) ) -                    aic7xxx_tag_info[instance].tag_commands[device] = -                      simple_strtoul(tok, NULL, 0) & 0xff; -                  tok = tok_end; -                  break; -              } -            } -            while((p != base) && (p != NULL)) -              p = strsep(&s, ",."); -          } -        } -        else if (p[n] == ':') -        { -          *(options[i].flag) = simple_strtoul(p + n + 1, NULL, 0); -          if(!strncmp(p, "seltime", n)) -          { -            *(options[i].flag) = (*(options[i].flag) % 4) << 3; -          } -        } -        else if (!strncmp(p, "verbose", n)) -        { -          *(options[i].flag) = 0xff29; -        } -        else -        { -          *(options[i].flag) = ~(*(options[i].flag)); -          if(!strncmp(p, "seltime", n)) -          { -            *(options[i].flag) = (*(options[i].flag) % 4) << 3; -          } -        } -      } -    } -  } -  return 1; -} - -__setup("aic7xxx=", aic7xxx_setup); - -/*+F************************************************************************* - * Function: - *   pause_sequencer - * - * Description: - *   Pause the sequencer and wait for it to actually stop - this - *   is important since the sequencer can disable pausing for critical - *   sections. - *-F*************************************************************************/ -static void -pause_sequencer(struct aic7xxx_host *p) -{ -  aic_outb(p, p->pause, HCNTRL); -  while ((aic_inb(p, HCNTRL) & PAUSE) == 0) -  { -    ; -  } -  if(p->features & AHC_ULTRA2) -  { -    aic_inb(p, CCSCBCTL); -  } -} - -/*+F************************************************************************* - * Function: - *   unpause_sequencer - * - * Description: - *   Unpause the sequencer. Unremarkable, yet done often enough to - *   warrant an easy way to do it. - *-F*************************************************************************/ -static void -unpause_sequencer(struct aic7xxx_host *p, int unpause_always) -{ -  if (unpause_always || -      ( !(aic_inb(p, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) && -        !(p->flags & AHC_HANDLING_REQINITS) ) ) -  { -    aic_outb(p, p->unpause, HCNTRL); -  } -} - -/*+F************************************************************************* - * Function: - *   restart_sequencer - * - * Description: - *   Restart the sequencer program from address zero.  This assumes - *   that the sequencer is already paused. - *-F*************************************************************************/ -static void -restart_sequencer(struct aic7xxx_host *p) -{ -  aic_outb(p, 0, SEQADDR0); -  aic_outb(p, 0, SEQADDR1); -  aic_outb(p, FASTMODE, SEQCTL); -} - -/* - * We include the aic7xxx_seq.c file here so that the other defines have - * already been made, and so that it comes before the code that actually - * downloads the instructions (since we don't typically use function - * prototype, our code has to be ordered that way, it's a left-over from - * the original driver days.....I should fix it some time DL). - */ -#include "aic7xxx_old/aic7xxx_seq.c" - -/*+F************************************************************************* - * Function: - *   aic7xxx_check_patch - * - * Description: - *   See if the next patch to download should be downloaded. - *-F*************************************************************************/ -static int -aic7xxx_check_patch(struct aic7xxx_host *p, -  struct sequencer_patch **start_patch, int start_instr, int *skip_addr) -{ -  struct sequencer_patch *cur_patch; -  struct sequencer_patch *last_patch; -  int num_patches; - -  num_patches = ARRAY_SIZE(sequencer_patches); -  last_patch = &sequencer_patches[num_patches]; -  cur_patch = *start_patch; - -  while ((cur_patch < last_patch) && (start_instr == cur_patch->begin)) -  { -    if (cur_patch->patch_func(p) == 0) -    { -      /* -       * Start rejecting code. -       */ -      *skip_addr = start_instr + cur_patch->skip_instr; -      cur_patch += cur_patch->skip_patch; -    } -    else -    { -      /* -       * Found an OK patch.  Advance the patch pointer to the next patch -       * and wait for our instruction pointer to get here. -       */ -      cur_patch++; -    } -  } - -  *start_patch = cur_patch; -  if (start_instr < *skip_addr) -    /* -     * Still skipping -     */ -    return (0); -  return(1); -} - - -/*+F************************************************************************* - * Function: - *   aic7xxx_download_instr - * - * Description: - *   Find the next patch to download. - *-F*************************************************************************/ -static void -aic7xxx_download_instr(struct aic7xxx_host *p, int instrptr, -  unsigned char *dconsts) -{ -  union ins_formats instr; -  struct ins_format1 *fmt1_ins; -  struct ins_format3 *fmt3_ins; -  unsigned char opcode; - -  instr = *(union ins_formats*) &seqprog[instrptr * 4]; - -  instr.integer = le32_to_cpu(instr.integer); -   -  fmt1_ins = &instr.format1; -  fmt3_ins = NULL; - -  /* Pull the opcode */ -  opcode = instr.format1.opcode; -  switch (opcode) -  { -    case AIC_OP_JMP: -    case AIC_OP_JC: -    case AIC_OP_JNC: -    case AIC_OP_CALL: -    case AIC_OP_JNE: -    case AIC_OP_JNZ: -    case AIC_OP_JE: -    case AIC_OP_JZ: -    { -      struct sequencer_patch *cur_patch; -      int address_offset; -      unsigned int address; -      int skip_addr; -      int i; - -      fmt3_ins = &instr.format3; -      address_offset = 0; -      address = fmt3_ins->address; -      cur_patch = sequencer_patches; -      skip_addr = 0; - -      for (i = 0; i < address;) -      { -        aic7xxx_check_patch(p, &cur_patch, i, &skip_addr); -        if (skip_addr > i) -        { -          int end_addr; - -          end_addr = min_t(int, address, skip_addr); -          address_offset += end_addr - i; -          i = skip_addr; -        } -        else -        { -          i++; -        } -      } -      address -= address_offset; -      fmt3_ins->address = address; -      /* Fall Through to the next code section */ -    } -    case AIC_OP_OR: -    case AIC_OP_AND: -    case AIC_OP_XOR: -    case AIC_OP_ADD: -    case AIC_OP_ADC: -    case AIC_OP_BMOV: -      if (fmt1_ins->parity != 0) -      { -        fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; -      } -      fmt1_ins->parity = 0; -      /* Fall Through to the next code section */ -    case AIC_OP_ROL: -      if ((p->features & AHC_ULTRA2) != 0) -      { -        int i, count; - -        /* Calculate odd parity for the instruction */ -        for ( i=0, count=0; i < 31; i++) -        { -          unsigned int mask; - -          mask = 0x01 << i; -          if ((instr.integer & mask) != 0) -            count++; -        } -        if (!(count & 0x01)) -          instr.format1.parity = 1; -      } -      else -      { -        if (fmt3_ins != NULL) -        { -          instr.integer =  fmt3_ins->immediate | -                          (fmt3_ins->source << 8) | -                          (fmt3_ins->address << 16) | -                          (fmt3_ins->opcode << 25); -        } -        else -        { -          instr.integer =  fmt1_ins->immediate | -                          (fmt1_ins->source << 8) | -                          (fmt1_ins->destination << 16) | -                          (fmt1_ins->ret << 24) | -                          (fmt1_ins->opcode << 25); -        } -      } -      aic_outb(p, (instr.integer & 0xff), SEQRAM); -      aic_outb(p, ((instr.integer >> 8) & 0xff), SEQRAM); -      aic_outb(p, ((instr.integer >> 16) & 0xff), SEQRAM); -      aic_outb(p, ((instr.integer >> 24) & 0xff), SEQRAM); -      udelay(10); -      break; - -    default: -      panic("aic7xxx: Unknown opcode encountered in sequencer program."); -      break; -  } -} - - -/*+F************************************************************************* - * Function: - *   aic7xxx_loadseq - * - * Description: - *   Load the sequencer code into the controller memory. - *-F*************************************************************************/ -static void -aic7xxx_loadseq(struct aic7xxx_host *p) -{ -  struct sequencer_patch *cur_patch; -  int i; -  int downloaded; -  int skip_addr; -  unsigned char download_consts[4] = {0, 0, 0, 0}; - -  if (aic7xxx_verbose & VERBOSE_PROBE) -  { -    printk(KERN_INFO "(scsi%d) Downloading sequencer code...", p->host_no); -  } -#if 0 -  download_consts[TMODE_NUMCMDS] = p->num_targetcmds; -#endif -  download_consts[TMODE_NUMCMDS] = 0; -  cur_patch = &sequencer_patches[0]; -  downloaded = 0; -  skip_addr = 0; - -  aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL); -  aic_outb(p, 0, SEQADDR0); -  aic_outb(p, 0, SEQADDR1); - -  for (i = 0; i < sizeof(seqprog) / 4;  i++) -  { -    if (aic7xxx_check_patch(p, &cur_patch, i, &skip_addr) == 0) -    { -      /* Skip this instruction for this configuration. */ -      continue; -    } -    aic7xxx_download_instr(p, i, &download_consts[0]); -    downloaded++; -  } - -  aic_outb(p, 0, SEQADDR0); -  aic_outb(p, 0, SEQADDR1); -  aic_outb(p, FASTMODE | FAILDIS, SEQCTL); -  unpause_sequencer(p, TRUE); -  mdelay(1); -  pause_sequencer(p); -  aic_outb(p, FASTMODE, SEQCTL); -  if (aic7xxx_verbose & VERBOSE_PROBE) -  { -    printk(" %d instructions downloaded\n", downloaded); -  } -  if (aic7xxx_dump_sequencer) -    aic7xxx_print_sequencer(p, downloaded); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_print_sequencer - * - * Description: - *   Print the contents of the sequencer memory to the screen. - *-F*************************************************************************/ -static void -aic7xxx_print_sequencer(struct aic7xxx_host *p, int downloaded) -{ -  int i, k, temp; -   -  aic_outb(p, PERRORDIS|LOADRAM|FAILDIS|FASTMODE, SEQCTL); -  aic_outb(p, 0, SEQADDR0); -  aic_outb(p, 0, SEQADDR1); - -  k = 0; -  for (i=0; i < downloaded; i++) -  { -    if ( k == 0 ) -      printk("%03x: ", i); -    temp = aic_inb(p, SEQRAM); -    temp |= (aic_inb(p, SEQRAM) << 8); -    temp |= (aic_inb(p, SEQRAM) << 16); -    temp |= (aic_inb(p, SEQRAM) << 24); -    printk("%08x", temp); -    if ( ++k == 8 ) -    { -      printk("\n"); -      k = 0; -    } -    else -      printk(" "); -  } -  aic_outb(p, 0, SEQADDR0); -  aic_outb(p, 0, SEQADDR1); -  aic_outb(p, FASTMODE | FAILDIS, SEQCTL); -  unpause_sequencer(p, TRUE); -  mdelay(1); -  pause_sequencer(p); -  aic_outb(p, FASTMODE, SEQCTL); -  printk("\n"); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_info - * - * Description: - *   Return a string describing the driver. - *-F*************************************************************************/ -static const char * -aic7xxx_info(struct Scsi_Host *dooh) -{ -  static char buffer[256]; -  char *bp; -  struct aic7xxx_host *p; - -  bp = &buffer[0]; -  p = (struct aic7xxx_host *)dooh->hostdata; -  memset(bp, 0, sizeof(buffer)); -  strcpy(bp, "Adaptec AHA274x/284x/294x (EISA/VLB/PCI-Fast SCSI) "); -  strcat(bp, AIC7XXX_C_VERSION); -  strcat(bp, "/"); -  strcat(bp, AIC7XXX_H_VERSION); -  strcat(bp, "\n"); -  strcat(bp, "       <"); -  strcat(bp, board_names[p->board_name_index]); -  strcat(bp, ">"); - -  return(bp); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_find_syncrate - * - * Description: - *   Look up the valid period to SCSIRATE conversion in our table - *-F*************************************************************************/ -static struct aic7xxx_syncrate * -aic7xxx_find_syncrate(struct aic7xxx_host *p, unsigned int *period, -  unsigned int maxsync, unsigned char *options) -{ -  struct aic7xxx_syncrate *syncrate; -  int done = FALSE; - -  switch(*options) -  { -    case MSG_EXT_PPR_OPTION_DT_CRC: -    case MSG_EXT_PPR_OPTION_DT_UNITS: -      if(!(p->features & AHC_ULTRA3)) -      { -        *options = 0; -        maxsync = max_t(unsigned int, maxsync, AHC_SYNCRATE_ULTRA2); -      } -      break; -    case MSG_EXT_PPR_OPTION_DT_CRC_QUICK: -    case MSG_EXT_PPR_OPTION_DT_UNITS_QUICK: -      if(!(p->features & AHC_ULTRA3)) -      { -        *options = 0; -        maxsync = max_t(unsigned int, maxsync, AHC_SYNCRATE_ULTRA2); -      } -      else -      { -        /* -         * we don't support the Quick Arbitration variants of dual edge -         * clocking.  As it turns out, we want to send back the -         * same basic option, but without the QA attribute. -         * We know that we are responding because we would never set -         * these options ourself, we would only respond to them. -         */ -        switch(*options) -        { -          case MSG_EXT_PPR_OPTION_DT_CRC_QUICK: -            *options = MSG_EXT_PPR_OPTION_DT_CRC; -            break; -          case MSG_EXT_PPR_OPTION_DT_UNITS_QUICK: -            *options = MSG_EXT_PPR_OPTION_DT_UNITS; -            break; -        } -      } -      break; -    default: -      *options = 0; -      maxsync = max_t(unsigned int, maxsync, AHC_SYNCRATE_ULTRA2); -      break; -  } -  syncrate = &aic7xxx_syncrates[maxsync]; -  while ( (syncrate->rate[0] != NULL) && -         (!(p->features & AHC_ULTRA2) || syncrate->sxfr_ultra2) ) -  { -    if (*period <= syncrate->period)  -    { -      switch(*options) -      { -        case MSG_EXT_PPR_OPTION_DT_CRC: -        case MSG_EXT_PPR_OPTION_DT_UNITS: -          if(!(syncrate->sxfr_ultra2 & AHC_SYNCRATE_CRC)) -          { -            done = TRUE; -            /* -             * oops, we went too low for the CRC/DualEdge signalling, so -             * clear the options byte -             */ -            *options = 0; -            /* -             * We'll be sending a reply to this packet to set the options -             * properly, so unilaterally set the period as well. -             */ -            *period = syncrate->period; -          } -          else -          { -            done = TRUE; -            if(syncrate == &aic7xxx_syncrates[maxsync]) -            { -              *period = syncrate->period; -            } -          } -          break; -        default: -          if(!(syncrate->sxfr_ultra2 & AHC_SYNCRATE_CRC)) -          { -            done = TRUE; -            if(syncrate == &aic7xxx_syncrates[maxsync]) -            { -              *period = syncrate->period; -            } -          } -          break; -      } -      if(done) -      { -        break; -      } -    } -    syncrate++; -  } -  if ( (*period == 0) || (syncrate->rate[0] == NULL) || -       ((p->features & AHC_ULTRA2) && (syncrate->sxfr_ultra2 == 0)) ) -  { -    /* -     * Use async transfers for this target -     */ -    *options = 0; -    *period = 255; -    syncrate = NULL; -  } -  return (syncrate); -} - - -/*+F************************************************************************* - * Function: - *   aic7xxx_find_period - * - * Description: - *   Look up the valid SCSIRATE to period conversion in our table - *-F*************************************************************************/ -static unsigned int -aic7xxx_find_period(struct aic7xxx_host *p, unsigned int scsirate, -  unsigned int maxsync) -{ -  struct aic7xxx_syncrate *syncrate; - -  if (p->features & AHC_ULTRA2) -  { -    scsirate &= SXFR_ULTRA2; -  } -  else -  { -    scsirate &= SXFR; -  } - -  syncrate = &aic7xxx_syncrates[maxsync]; -  while (syncrate->rate[0] != NULL) -  { -    if (p->features & AHC_ULTRA2) -    { -      if (syncrate->sxfr_ultra2 == 0) -        break; -      else if (scsirate == syncrate->sxfr_ultra2) -        return (syncrate->period); -      else if (scsirate == (syncrate->sxfr_ultra2 & ~AHC_SYNCRATE_CRC)) -        return (syncrate->period); -    } -    else if (scsirate == (syncrate->sxfr & ~ULTRA_SXFR)) -    { -      return (syncrate->period); -    } -    syncrate++; -  } -  return (0); /* async */ -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_validate_offset - * - * Description: - *   Set a valid offset value for a particular card in use and transfer - *   settings in use. - *-F*************************************************************************/ -static void -aic7xxx_validate_offset(struct aic7xxx_host *p, -  struct aic7xxx_syncrate *syncrate, unsigned int *offset, int wide) -{ -  unsigned int maxoffset; - -  /* Limit offset to what the card (and device) can do */ -  if (syncrate == NULL) -  { -    maxoffset = 0; -  } -  else if (p->features & AHC_ULTRA2) -  { -    maxoffset = MAX_OFFSET_ULTRA2; -  } -  else -  { -    if (wide) -      maxoffset = MAX_OFFSET_16BIT; -    else -      maxoffset = MAX_OFFSET_8BIT; -  } -  *offset = min(*offset, maxoffset); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_set_syncrate - * - * Description: - *   Set the actual syncrate down in the card and in our host structs - *-F*************************************************************************/ -static void -aic7xxx_set_syncrate(struct aic7xxx_host *p, struct aic7xxx_syncrate *syncrate, -    int target, int channel, unsigned int period, unsigned int offset, -    unsigned char options, unsigned int type, struct aic_dev_data *aic_dev) -{ -  unsigned char tindex; -  unsigned short target_mask; -  unsigned char lun, old_options; -  unsigned int old_period, old_offset; - -  tindex = target | (channel << 3); -  target_mask = 0x01 << tindex; -  lun = aic_inb(p, SCB_TCL) & 0x07; - -  if (syncrate == NULL) -  { -    period = 0; -    offset = 0; -  } - -  old_period = aic_dev->cur.period; -  old_offset = aic_dev->cur.offset; -  old_options = aic_dev->cur.options; - -   -  if (type & AHC_TRANS_CUR) -  { -    unsigned int scsirate; - -    scsirate = aic_inb(p, TARG_SCSIRATE + tindex); -    if (p->features & AHC_ULTRA2) -    { -      scsirate &= ~SXFR_ULTRA2; -      if (syncrate != NULL) -      { -        switch(options) -        { -          case MSG_EXT_PPR_OPTION_DT_UNITS: -            /* -             * mask off the CRC bit in the xfer settings -             */ -            scsirate |= (syncrate->sxfr_ultra2 & ~AHC_SYNCRATE_CRC); -            break; -          default: -            scsirate |= syncrate->sxfr_ultra2; -            break; -        } -      } -      if (type & AHC_TRANS_ACTIVE) -      { -        aic_outb(p, offset, SCSIOFFSET); -      } -      aic_outb(p, offset, TARG_OFFSET + tindex); -    } -    else /* Not an Ultra2 controller */ -    { -      scsirate &= ~(SXFR|SOFS); -      p->ultraenb &= ~target_mask; -      if (syncrate != NULL) -      { -        if (syncrate->sxfr & ULTRA_SXFR) -        { -          p->ultraenb |= target_mask; -        } -        scsirate |= (syncrate->sxfr & SXFR); -        scsirate |= (offset & SOFS); -      } -      if (type & AHC_TRANS_ACTIVE) -      { -        unsigned char sxfrctl0; - -        sxfrctl0 = aic_inb(p, SXFRCTL0); -        sxfrctl0 &= ~FAST20; -        if (p->ultraenb & target_mask) -          sxfrctl0 |= FAST20; -        aic_outb(p, sxfrctl0, SXFRCTL0); -      } -      aic_outb(p, p->ultraenb & 0xff, ULTRA_ENB); -      aic_outb(p, (p->ultraenb >> 8) & 0xff, ULTRA_ENB + 1 ); -    } -    if (type & AHC_TRANS_ACTIVE) -    { -      aic_outb(p, scsirate, SCSIRATE); -    } -    aic_outb(p, scsirate, TARG_SCSIRATE + tindex); -    aic_dev->cur.period = period; -    aic_dev->cur.offset = offset; -    aic_dev->cur.options = options; -    if ( !(type & AHC_TRANS_QUITE) && -         (aic7xxx_verbose & VERBOSE_NEGOTIATION) && -         (aic_dev->flags & DEVICE_PRINT_DTR) ) -    { -      if (offset) -      { -        int rate_mod = (scsirate & WIDEXFER) ? 1 : 0; -       -        printk(INFO_LEAD "Synchronous at %s Mbyte/sec, " -               "offset %d.\n", p->host_no, channel, target, lun, -               syncrate->rate[rate_mod], offset); -      } -      else -      { -        printk(INFO_LEAD "Using asynchronous transfers.\n", -               p->host_no, channel, target, lun); -      } -      aic_dev->flags &= ~DEVICE_PRINT_DTR; -    } -  } - -  if (type & AHC_TRANS_GOAL) -  { -    aic_dev->goal.period = period; -    aic_dev->goal.offset = offset; -    aic_dev->goal.options = options; -  } - -  if (type & AHC_TRANS_USER) -  { -    p->user[tindex].period = period; -    p->user[tindex].offset = offset; -    p->user[tindex].options = options; -  } -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_set_width - * - * Description: - *   Set the actual width down in the card and in our host structs - *-F*************************************************************************/ -static void -aic7xxx_set_width(struct aic7xxx_host *p, int target, int channel, int lun, -    unsigned int width, unsigned int type, struct aic_dev_data *aic_dev) -{ -  unsigned char tindex; -  unsigned short target_mask; -  unsigned int old_width; - -  tindex = target | (channel << 3); -  target_mask = 1 << tindex; -   -  old_width = aic_dev->cur.width; - -  if (type & AHC_TRANS_CUR)  -  { -    unsigned char scsirate; - -    scsirate = aic_inb(p, TARG_SCSIRATE + tindex); - -    scsirate &= ~WIDEXFER; -    if (width == MSG_EXT_WDTR_BUS_16_BIT) -      scsirate |= WIDEXFER; - -    aic_outb(p, scsirate, TARG_SCSIRATE + tindex); - -    if (type & AHC_TRANS_ACTIVE) -      aic_outb(p, scsirate, SCSIRATE); - -    aic_dev->cur.width = width; - -    if ( !(type & AHC_TRANS_QUITE) && -          (aic7xxx_verbose & VERBOSE_NEGOTIATION2) &&  -          (aic_dev->flags & DEVICE_PRINT_DTR) ) -    { -      printk(INFO_LEAD "Using %s transfers\n", p->host_no, channel, target, -        lun, (scsirate & WIDEXFER) ? "Wide(16bit)" : "Narrow(8bit)" ); -    } -  } - -  if (type & AHC_TRANS_GOAL) -    aic_dev->goal.width = width; -  if (type & AHC_TRANS_USER) -    p->user[tindex].width = width; - -  if (aic_dev->goal.offset) -  { -    if (p->features & AHC_ULTRA2) -    { -      aic_dev->goal.offset = MAX_OFFSET_ULTRA2; -    } -    else if (width == MSG_EXT_WDTR_BUS_16_BIT) -    { -      aic_dev->goal.offset = MAX_OFFSET_16BIT; -    } -    else -    { -      aic_dev->goal.offset = MAX_OFFSET_8BIT; -    } -  } -} -       -/*+F************************************************************************* - * Function: - *   scbq_init - * - * Description: - *   SCB queue initialization. - * - *-F*************************************************************************/ -static void -scbq_init(volatile scb_queue_type *queue) -{ -  queue->head = NULL; -  queue->tail = NULL; -} - -/*+F************************************************************************* - * Function: - *   scbq_insert_head - * - * Description: - *   Add an SCB to the head of the list. - * - *-F*************************************************************************/ -static inline void -scbq_insert_head(volatile scb_queue_type *queue, struct aic7xxx_scb *scb) -{ -  scb->q_next = queue->head; -  queue->head = scb; -  if (queue->tail == NULL)       /* If list was empty, update tail. */ -    queue->tail = queue->head; -} - -/*+F************************************************************************* - * Function: - *   scbq_remove_head - * - * Description: - *   Remove an SCB from the head of the list. - * - *-F*************************************************************************/ -static inline struct aic7xxx_scb * -scbq_remove_head(volatile scb_queue_type *queue) -{ -  struct aic7xxx_scb * scbp; - -  scbp = queue->head; -  if (queue->head != NULL) -    queue->head = queue->head->q_next; -  if (queue->head == NULL)       /* If list is now empty, update tail. */ -    queue->tail = NULL; -  return(scbp); -} - -/*+F************************************************************************* - * Function: - *   scbq_remove - * - * Description: - *   Removes an SCB from the list. - * - *-F*************************************************************************/ -static inline void -scbq_remove(volatile scb_queue_type *queue, struct aic7xxx_scb *scb) -{ -  if (queue->head == scb) -  { -    /* At beginning of queue, remove from head. */ -    scbq_remove_head(queue); -  } -  else -  { -    struct aic7xxx_scb *curscb = queue->head; - -    /* -     * Search until the next scb is the one we're looking for, or -     * we run out of queue. -     */ -    while ((curscb != NULL) && (curscb->q_next != scb)) -    { -      curscb = curscb->q_next; -    } -    if (curscb != NULL) -    { -      /* Found it. */ -      curscb->q_next = scb->q_next; -      if (scb->q_next == NULL) -      { -        /* Update the tail when removing the tail. */ -        queue->tail = curscb; -      } -    } -  } -} - -/*+F************************************************************************* - * Function: - *   scbq_insert_tail - * - * Description: - *   Add an SCB at the tail of the list. - * - *-F*************************************************************************/ -static inline void -scbq_insert_tail(volatile scb_queue_type *queue, struct aic7xxx_scb *scb) -{ -  scb->q_next = NULL; -  if (queue->tail != NULL)       /* Add the scb at the end of the list. */ -    queue->tail->q_next = scb; -  queue->tail = scb;             /* Update the tail. */ -  if (queue->head == NULL)       /* If list was empty, update head. */ -    queue->head = queue->tail; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_match_scb - * - * Description: - *   Checks to see if an scb matches the target/channel as specified. - *   If target is ALL_TARGETS (-1), then we're looking for any device - *   on the specified channel; this happens when a channel is going - *   to be reset and all devices on that channel must be aborted. - *-F*************************************************************************/ -static int -aic7xxx_match_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb, -    int target, int channel, int lun, unsigned char tag) -{ -  int targ = (scb->hscb->target_channel_lun >> 4) & 0x0F; -  int chan = (scb->hscb->target_channel_lun >> 3) & 0x01; -  int slun = scb->hscb->target_channel_lun & 0x07; -  int match; - -  match = ((chan == channel) || (channel == ALL_CHANNELS)); -  if (match != 0) -    match = ((targ == target) || (target == ALL_TARGETS)); -  if (match != 0) -    match = ((lun == slun) || (lun == ALL_LUNS)); -  if (match != 0) -    match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); - -  return (match); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_add_curscb_to_free_list - * - * Description: - *   Adds the current scb (in SCBPTR) to the list of free SCBs. - *-F*************************************************************************/ -static void -aic7xxx_add_curscb_to_free_list(struct aic7xxx_host *p) -{ -  /* -   * Invalidate the tag so that aic7xxx_find_scb doesn't think -   * it's active -   */ -  aic_outb(p, SCB_LIST_NULL, SCB_TAG); -  aic_outb(p, 0, SCB_CONTROL); - -  aic_outb(p, aic_inb(p, FREE_SCBH), SCB_NEXT); -  aic_outb(p, aic_inb(p, SCBPTR), FREE_SCBH); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_rem_scb_from_disc_list - * - * Description: - *   Removes the current SCB from the disconnected list and adds it - *   to the free list. - *-F*************************************************************************/ -static unsigned char -aic7xxx_rem_scb_from_disc_list(struct aic7xxx_host *p, unsigned char scbptr, -                               unsigned char prev) -{ -  unsigned char next; - -  aic_outb(p, scbptr, SCBPTR); -  next = aic_inb(p, SCB_NEXT); -  aic7xxx_add_curscb_to_free_list(p); - -  if (prev != SCB_LIST_NULL) -  { -    aic_outb(p, prev, SCBPTR); -    aic_outb(p, next, SCB_NEXT); -  } -  else -  { -    aic_outb(p, next, DISCONNECTED_SCBH); -  } - -  return next; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_busy_target - * - * Description: - *   Set the specified target busy. - *-F*************************************************************************/ -static inline void -aic7xxx_busy_target(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ -  p->untagged_scbs[scb->hscb->target_channel_lun] = scb->hscb->tag; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_index_busy_target - * - * Description: - *   Returns the index of the busy target, and optionally sets the - *   target inactive. - *-F*************************************************************************/ -static inline unsigned char -aic7xxx_index_busy_target(struct aic7xxx_host *p, unsigned char tcl, -    int unbusy) -{ -  unsigned char busy_scbid; - -  busy_scbid = p->untagged_scbs[tcl]; -  if (unbusy) -  { -    p->untagged_scbs[tcl] = SCB_LIST_NULL; -  } -  return (busy_scbid); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_find_scb - * - * Description: - *   Look through the SCB array of the card and attempt to find the - *   hardware SCB that corresponds to the passed in SCB.  Return - *   SCB_LIST_NULL if unsuccessful.  This routine assumes that the - *   card is already paused. - *-F*************************************************************************/ -static unsigned char -aic7xxx_find_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ -  unsigned char saved_scbptr; -  unsigned char curindex; - -  saved_scbptr = aic_inb(p, SCBPTR); -  curindex = 0; -  for (curindex = 0; curindex < p->scb_data->maxhscbs; curindex++) -  { -    aic_outb(p, curindex, SCBPTR); -    if (aic_inb(p, SCB_TAG) == scb->hscb->tag) -    { -      break; -    } -  } -  aic_outb(p, saved_scbptr, SCBPTR); -  if (curindex >= p->scb_data->maxhscbs) -  { -    curindex = SCB_LIST_NULL; -  } - -  return (curindex); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_allocate_scb - * - * Description: - *   Get an SCB from the free list or by allocating a new one. - *-F*************************************************************************/ -static int -aic7xxx_allocate_scb(struct aic7xxx_host *p) -{ -  struct aic7xxx_scb   *scbp = NULL; -  int scb_size = (sizeof (struct hw_scatterlist) * AIC7XXX_MAX_SG) + 12 + 6; -  int i; -  int step = PAGE_SIZE / 1024; -  unsigned long scb_count = 0; -  struct hw_scatterlist *hsgp; -  struct aic7xxx_scb *scb_ap; -  struct aic7xxx_scb_dma *scb_dma; -  unsigned char *bufs; - -  if (p->scb_data->numscbs < p->scb_data->maxscbs) -  { -    /* -     * Calculate the optimal number of SCBs to allocate. -     * -     * NOTE: This formula works because the sizeof(sg_array) is always -     * 1024.  Therefore, scb_size * i would always be > PAGE_SIZE * -     * (i/step).  The (i-1) allows the left hand side of the equation -     * to grow into the right hand side to a point of near perfect -     * efficiency since scb_size * (i -1) is growing slightly faster -     * than the right hand side.  If the number of SG array elements -     * is changed, this function may not be near so efficient any more. -     * -     * Since the DMA'able buffers are now allocated in a separate -     * chunk this algorithm has been modified to match.  The '12' -     * and '6' factors in scb_size are for the DMA'able command byte -     * and sensebuffers respectively.  -DaveM -     */ -    for ( i=step;; i *= 2 ) -    { -      if ( (scb_size * (i-1)) >= ( (PAGE_SIZE * (i/step)) - 64 ) ) -      { -        i /= 2; -        break; -      } -    } -    scb_count = min( (i-1), p->scb_data->maxscbs - p->scb_data->numscbs); -    scb_ap = kmalloc(sizeof (struct aic7xxx_scb) * scb_count -					   + sizeof(struct aic7xxx_scb_dma), GFP_ATOMIC); -    if (scb_ap == NULL) -      return(0); -    scb_dma = (struct aic7xxx_scb_dma *)&scb_ap[scb_count]; -    hsgp = (struct hw_scatterlist *) -      pci_alloc_consistent(p->pdev, scb_size * scb_count, -			   &scb_dma->dma_address); -    if (hsgp == NULL) -    { -      kfree(scb_ap); -      return(0); -    } -    bufs = (unsigned char *)&hsgp[scb_count * AIC7XXX_MAX_SG]; -#ifdef AIC7XXX_VERBOSE_DEBUGGING -    if (aic7xxx_verbose > 0xffff) -    { -      if (p->scb_data->numscbs == 0) -	printk(INFO_LEAD "Allocating initial %ld SCB structures.\n", -	  p->host_no, -1, -1, -1, scb_count); -      else -	printk(INFO_LEAD "Allocating %ld additional SCB structures.\n", -	  p->host_no, -1, -1, -1, scb_count); -    } -#endif -    memset(scb_ap, 0, sizeof (struct aic7xxx_scb) * scb_count); -    scb_dma->dma_offset = (unsigned long)scb_dma->dma_address -			  - (unsigned long)hsgp; -    scb_dma->dma_len = scb_size * scb_count; -    for (i=0; i < scb_count; i++) -    { -      scbp = &scb_ap[i]; -      scbp->hscb = &p->scb_data->hscbs[p->scb_data->numscbs]; -      scbp->sg_list = &hsgp[i * AIC7XXX_MAX_SG]; -      scbp->sense_cmd = bufs; -      scbp->cmnd = bufs + 6; -      bufs += 12 + 6; -      scbp->scb_dma = scb_dma; -      memset(scbp->hscb, 0, sizeof(struct aic7xxx_hwscb)); -      scbp->hscb->tag = p->scb_data->numscbs; -      /* -       * Place in the scb array; never is removed -       */ -      p->scb_data->scb_array[p->scb_data->numscbs++] = scbp; -      scbq_insert_tail(&p->scb_data->free_scbs, scbp); -    } -    scbp->kmalloc_ptr = scb_ap; -  } -  return(scb_count); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_queue_cmd_complete - * - * Description: - *   Due to race conditions present in the SCSI subsystem, it is easier - *   to queue completed commands, then call scsi_done() on them when - *   we're finished.  This function queues the completed commands. - *-F*************************************************************************/ -static void -aic7xxx_queue_cmd_complete(struct aic7xxx_host *p, struct scsi_cmnd *cmd) -{ -  aic7xxx_position(cmd) = SCB_LIST_NULL; -  cmd->host_scribble = (char *)p->completeq.head; -  p->completeq.head = cmd; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_done_cmds_complete - * - * Description: - *   Process the completed command queue. - *-F*************************************************************************/ -static void aic7xxx_done_cmds_complete(struct aic7xxx_host *p) -{ -	struct scsi_cmnd *cmd; - -	while (p->completeq.head != NULL) { -		cmd = p->completeq.head; -		p->completeq.head = (struct scsi_cmnd *) cmd->host_scribble; -		cmd->host_scribble = NULL; -		cmd->scsi_done(cmd); -	} -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_free_scb - * - * Description: - *   Free the scb and insert into the free scb list. - *-F*************************************************************************/ -static void -aic7xxx_free_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ - -  scb->flags = SCB_FREE; -  scb->cmd = NULL; -  scb->sg_count = 0; -  scb->sg_length = 0; -  scb->tag_action = 0; -  scb->hscb->control = 0; -  scb->hscb->target_status = 0; -  scb->hscb->target_channel_lun = SCB_LIST_NULL; - -  scbq_insert_head(&p->scb_data->free_scbs, scb); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_done - * - * Description: - *   Calls the higher level scsi done function and frees the scb. - *-F*************************************************************************/ -static void -aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ -	struct scsi_cmnd *cmd = scb->cmd; -	struct aic_dev_data *aic_dev = cmd->device->hostdata; -	int tindex = TARGET_INDEX(cmd); -	struct aic7xxx_scb *scbp; -	unsigned char queue_depth; - -        scsi_dma_unmap(cmd); - -  if (scb->flags & SCB_SENSE) -  { -    pci_unmap_single(p->pdev, -                     le32_to_cpu(scb->sg_list[0].address), -                     SCSI_SENSE_BUFFERSIZE, -                     PCI_DMA_FROMDEVICE); -  } -  if (scb->flags & SCB_RECOVERY_SCB) -  { -    p->flags &= ~AHC_ABORT_PENDING; -  } -  if (scb->flags & (SCB_RESET|SCB_ABORT)) -  { -    cmd->result |= (DID_RESET << 16); -  } - -  if ((scb->flags & SCB_MSGOUT_BITS) != 0) -  { -    unsigned short mask; -    int message_error = FALSE; - -    mask = 0x01 << tindex; -  -    /* -     * Check to see if we get an invalid message or a message error -     * after failing to negotiate a wide or sync transfer message. -     */ -    if ((scb->flags & SCB_SENSE) &&  -          ((scb->cmd->sense_buffer[12] == 0x43) ||  /* INVALID_MESSAGE */ -          (scb->cmd->sense_buffer[12] == 0x49))) /* MESSAGE_ERROR  */ -    { -      message_error = TRUE; -    } - -    if (scb->flags & SCB_MSGOUT_WDTR) -    { -      if (message_error) -      { -        if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && -             (aic_dev->flags & DEVICE_PRINT_DTR) ) -        { -          printk(INFO_LEAD "Device failed to complete Wide Negotiation " -            "processing and\n", p->host_no, CTL_OF_SCB(scb)); -          printk(INFO_LEAD "returned a sense error code for invalid message, " -            "disabling future\n", p->host_no, CTL_OF_SCB(scb)); -          printk(INFO_LEAD "Wide negotiation to this device.\n", p->host_no, -            CTL_OF_SCB(scb)); -        } -        aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; -      } -    } -    if (scb->flags & SCB_MSGOUT_SDTR) -    { -      if (message_error) -      { -        if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && -             (aic_dev->flags & DEVICE_PRINT_DTR) ) -        { -          printk(INFO_LEAD "Device failed to complete Sync Negotiation " -            "processing and\n", p->host_no, CTL_OF_SCB(scb)); -          printk(INFO_LEAD "returned a sense error code for invalid message, " -            "disabling future\n", p->host_no, CTL_OF_SCB(scb)); -          printk(INFO_LEAD "Sync negotiation to this device.\n", p->host_no, -            CTL_OF_SCB(scb)); -          aic_dev->flags &= ~DEVICE_PRINT_DTR; -        } -        aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; -      } -    } -    if (scb->flags & SCB_MSGOUT_PPR) -    { -      if(message_error) -      { -        if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && -             (aic_dev->flags & DEVICE_PRINT_DTR) ) -        { -          printk(INFO_LEAD "Device failed to complete Parallel Protocol " -            "Request processing and\n", p->host_no, CTL_OF_SCB(scb)); -          printk(INFO_LEAD "returned a sense error code for invalid message, " -            "disabling future\n", p->host_no, CTL_OF_SCB(scb)); -          printk(INFO_LEAD "Parallel Protocol Request negotiation to this " -            "device.\n", p->host_no, CTL_OF_SCB(scb)); -        } -        /* -         * Disable PPR negotiation and revert back to WDTR and SDTR setup -         */ -        aic_dev->needppr = aic_dev->needppr_copy = 0; -        aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; -        aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; -      } -    } -  } - -  queue_depth = aic_dev->temp_q_depth; -  if (queue_depth >= aic_dev->active_cmds) -  { -    scbp = scbq_remove_head(&aic_dev->delayed_scbs); -    if (scbp) -    { -      if (queue_depth == 1) -      { -        /* -         * Give extra preference to untagged devices, such as CD-R devices -         * This makes it more likely that a drive *won't* stuff up while -         * waiting on data at a critical time, such as CD-R writing and -         * audio CD ripping operations.  Should also benefit tape drives. -         */ -        scbq_insert_head(&p->waiting_scbs, scbp); -      } -      else -      { -        scbq_insert_tail(&p->waiting_scbs, scbp); -      } -#ifdef AIC7XXX_VERBOSE_DEBUGGING -      if (aic7xxx_verbose > 0xffff) -        printk(INFO_LEAD "Moving SCB from delayed to waiting queue.\n", -               p->host_no, CTL_OF_SCB(scbp)); -#endif -      if (queue_depth > aic_dev->active_cmds) -      { -        scbp = scbq_remove_head(&aic_dev->delayed_scbs); -        if (scbp) -          scbq_insert_tail(&p->waiting_scbs, scbp); -      } -    } -  } -  if (!(scb->tag_action)) -  { -    aic7xxx_index_busy_target(p, scb->hscb->target_channel_lun, -                              /* unbusy */ TRUE); -    if (cmd->device->simple_tags) -    { -      aic_dev->temp_q_depth = aic_dev->max_q_depth; -    } -  } -  if(scb->flags & SCB_DTR_SCB) -  { -    aic_dev->dtr_pending = 0; -  } -  aic_dev->active_cmds--; -  p->activescbs--; - -  if ((scb->sg_length >= 512) && (((cmd->result >> 16) & 0xf) == DID_OK)) -  { -    long *ptr; -    int x, i; - - -    if (rq_data_dir(cmd->request) == WRITE) -    { -      aic_dev->w_total++; -      ptr = aic_dev->w_bins; -    } -    else -    { -      aic_dev->r_total++; -      ptr = aic_dev->r_bins; -    } -    x = scb->sg_length; -    x >>= 10; -    for(i=0; i<6; i++) -    { -      x >>= 2; -      if(!x) { -        ptr[i]++; -	break; -      } -    } -    if(i == 6 && x) -      ptr[5]++; -  } -  aic7xxx_free_scb(p, scb); -  aic7xxx_queue_cmd_complete(p, cmd); - -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_run_done_queue - * - * Description: - *   Calls the aic7xxx_done() for the scsi_cmnd of each scb in the - *   aborted list, and adds each scb to the free list.  If complete - *   is TRUE, we also process the commands complete list. - *-F*************************************************************************/ -static void -aic7xxx_run_done_queue(struct aic7xxx_host *p, /*complete*/ int complete) -{ -  struct aic7xxx_scb *scb; -  int i, found = 0; - -  for (i = 0; i < p->scb_data->numscbs; i++) -  { -    scb = p->scb_data->scb_array[i]; -    if (scb->flags & SCB_QUEUED_FOR_DONE) -    { -      if (scb->flags & SCB_QUEUE_FULL) -      { -	scb->cmd->result = QUEUE_FULL << 1; -      } -      else -      { -        if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) -          printk(INFO_LEAD "Aborting scb %d\n", -               p->host_no, CTL_OF_SCB(scb), scb->hscb->tag); -        /* -         * Clear any residual information since the normal aic7xxx_done() path -         * doesn't touch the residuals. -         */ -        scb->hscb->residual_SG_segment_count = 0; -        scb->hscb->residual_data_count[0] = 0; -        scb->hscb->residual_data_count[1] = 0; -        scb->hscb->residual_data_count[2] = 0; -      } -      found++; -      aic7xxx_done(p, scb); -    } -  } -  if (aic7xxx_verbose & (VERBOSE_ABORT_RETURN | VERBOSE_RESET_RETURN)) -  { -    printk(INFO_LEAD "%d commands found and queued for " -        "completion.\n", p->host_no, -1, -1, -1, found); -  } -  if (complete) -  { -    aic7xxx_done_cmds_complete(p); -  } -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_abort_waiting_scb - * - * Description: - *   Manipulate the waiting for selection list and return the - *   scb that follows the one that we remove. - *-F*************************************************************************/ -static unsigned char -aic7xxx_abort_waiting_scb(struct aic7xxx_host *p, struct aic7xxx_scb *scb, -    unsigned char scbpos, unsigned char prev) -{ -  unsigned char curscb, next; - -  /* -   * Select the SCB we want to abort and pull the next pointer out of it. -   */ -  curscb = aic_inb(p, SCBPTR); -  aic_outb(p, scbpos, SCBPTR); -  next = aic_inb(p, SCB_NEXT); - -  aic7xxx_add_curscb_to_free_list(p); - -  /* -   * Update the waiting list -   */ -  if (prev == SCB_LIST_NULL) -  { -    /* -     * First in the list -     */ -    aic_outb(p, next, WAITING_SCBH); -  } -  else -  { -    /* -     * Select the scb that pointed to us and update its next pointer. -     */ -    aic_outb(p, prev, SCBPTR); -    aic_outb(p, next, SCB_NEXT); -  } -  /* -   * Point us back at the original scb position and inform the SCSI -   * system that the command has been aborted. -   */ -  aic_outb(p, curscb, SCBPTR); -  return (next); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_search_qinfifo - * - * Description: - *   Search the queue-in FIFO for matching SCBs and conditionally - *   requeue.  Returns the number of matching SCBs. - *-F*************************************************************************/ -static int -aic7xxx_search_qinfifo(struct aic7xxx_host *p, int target, int channel, -    int lun, unsigned char tag, int flags, int requeue, -    volatile scb_queue_type *queue) -{ -  int      found; -  unsigned char qinpos, qintail; -  struct aic7xxx_scb *scbp; - -  found = 0; -  qinpos = aic_inb(p, QINPOS); -  qintail = p->qinfifonext; - -  p->qinfifonext = qinpos; - -  while (qinpos != qintail) -  { -    scbp = p->scb_data->scb_array[p->qinfifo[qinpos++]]; -    if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag)) -    { -       /* -        * We found an scb that needs to be removed. -        */ -       if (requeue && (queue != NULL)) -       { -         if (scbp->flags & SCB_WAITINGQ) -         { -           scbq_remove(queue, scbp); -           scbq_remove(&p->waiting_scbs, scbp); -           scbq_remove(&AIC_DEV(scbp->cmd)->delayed_scbs, scbp); -           AIC_DEV(scbp->cmd)->active_cmds++; -           p->activescbs++; -         } -         scbq_insert_tail(queue, scbp); -         AIC_DEV(scbp->cmd)->active_cmds--; -         p->activescbs--; -         scbp->flags |= SCB_WAITINGQ; -         if ( !(scbp->tag_action & TAG_ENB) ) -         { -           aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun, -             TRUE); -         } -       } -       else if (requeue) -       { -         p->qinfifo[p->qinfifonext++] = scbp->hscb->tag; -       } -       else -       { -        /* -         * Preserve any SCB_RECOVERY_SCB flags on this scb then set the -         * flags we were called with, presumeably so aic7xxx_run_done_queue -         * can find this scb -         */ -         scbp->flags = flags | (scbp->flags & SCB_RECOVERY_SCB); -         if (aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun, -                                       FALSE) == scbp->hscb->tag) -         { -           aic7xxx_index_busy_target(p, scbp->hscb->target_channel_lun, -             TRUE); -         } -       } -       found++; -    } -    else -    { -      p->qinfifo[p->qinfifonext++] = scbp->hscb->tag; -    } -  } -  /* -   * Now that we've done the work, clear out any left over commands in the -   * qinfifo and update the KERNEL_QINPOS down on the card. -   * -   *  NOTE: This routine expect the sequencer to already be paused when -   *        it is run....make sure it's that way! -   */ -  qinpos = p->qinfifonext; -  while(qinpos != qintail) -  { -    p->qinfifo[qinpos++] = SCB_LIST_NULL; -  } -  if (p->features & AHC_QUEUE_REGS) -    aic_outb(p, p->qinfifonext, HNSCB_QOFF); -  else -    aic_outb(p, p->qinfifonext, KERNEL_QINPOS); - -  return (found); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_scb_on_qoutfifo - * - * Description: - *   Is the scb that was passed to us currently on the qoutfifo? - *-F*************************************************************************/ -static int -aic7xxx_scb_on_qoutfifo(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ -  int i=0; - -  while(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] != SCB_LIST_NULL) -  { -    if(p->qoutfifo[(p->qoutfifonext + i) & 0xff ] == scb->hscb->tag) -      return TRUE; -    else -      i++; -  } -  return FALSE; -} - - -/*+F************************************************************************* - * Function: - *   aic7xxx_reset_device - * - * Description: - *   The device at the given target/channel has been reset.  Abort - *   all active and queued scbs for that target/channel.  This function - *   need not worry about linked next pointers because if was a MSG_ABORT_TAG - *   then we had a tagged command (no linked next), if it was MSG_ABORT or - *   MSG_BUS_DEV_RESET then the device won't know about any commands any more - *   and no busy commands will exist, and if it was a bus reset, then nothing - *   knows about any linked next commands any more.  In all cases, we don't - *   need to worry about the linked next or busy scb, we just need to clear - *   them. - *-F*************************************************************************/ -static void -aic7xxx_reset_device(struct aic7xxx_host *p, int target, int channel, -                     int lun, unsigned char tag) -{ -  struct aic7xxx_scb *scbp, *prev_scbp; -  struct scsi_device *sd; -  unsigned char active_scb, tcl, scb_tag; -  int i = 0, init_lists = FALSE; -  struct aic_dev_data *aic_dev; - -  /* -   * Restore this when we're done -   */ -  active_scb = aic_inb(p, SCBPTR); -  scb_tag = aic_inb(p, SCB_TAG); - -  if (aic7xxx_verbose & (VERBOSE_RESET_PROCESS | VERBOSE_ABORT_PROCESS)) -  { -    printk(INFO_LEAD "Reset device, hardware_scb %d,\n", -         p->host_no, channel, target, lun, active_scb); -    printk(INFO_LEAD "Current scb %d, SEQADDR 0x%x, LASTPHASE " -           "0x%x\n", -         p->host_no, channel, target, lun, scb_tag, -         aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), -         aic_inb(p, LASTPHASE)); -    printk(INFO_LEAD "SG_CACHEPTR 0x%x, SG_COUNT %d, SCSISIGI 0x%x\n", -         p->host_no, channel, target, lun, -         (p->features & AHC_ULTRA2) ?  aic_inb(p, SG_CACHEPTR) : 0, -         aic_inb(p, SG_COUNT), aic_inb(p, SCSISIGI)); -    printk(INFO_LEAD "SSTAT0 0x%x, SSTAT1 0x%x, SSTAT2 0x%x\n", -         p->host_no, channel, target, lun, aic_inb(p, SSTAT0), -         aic_inb(p, SSTAT1), aic_inb(p, SSTAT2)); -  } - -  /* -   * Deal with the busy target and linked next issues. -   */ -  list_for_each_entry(aic_dev, &p->aic_devs, list) -  { -    if (aic7xxx_verbose & (VERBOSE_RESET_PROCESS | VERBOSE_ABORT_PROCESS)) -      printk(INFO_LEAD "processing aic_dev %p\n", p->host_no, channel, target, -		    lun, aic_dev); -    sd = aic_dev->SDptr; - -    if((target != ALL_TARGETS && target != sd->id) || -       (channel != ALL_CHANNELS && channel != sd->channel)) -      continue; -    if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) -        printk(INFO_LEAD "Cleaning up status information " -          "and delayed_scbs.\n", p->host_no, sd->channel, sd->id, sd->lun); -    aic_dev->flags &= ~BUS_DEVICE_RESET_PENDING; -    if ( tag == SCB_LIST_NULL ) -    { -      aic_dev->dtr_pending = 0; -      aic_dev->needppr = aic_dev->needppr_copy; -      aic_dev->needsdtr = aic_dev->needsdtr_copy; -      aic_dev->needwdtr = aic_dev->needwdtr_copy; -      aic_dev->flags = DEVICE_PRINT_DTR; -      aic_dev->temp_q_depth = aic_dev->max_q_depth; -    } -    tcl = (sd->id << 4) | (sd->channel << 3) | sd->lun; -    if ( (aic7xxx_index_busy_target(p, tcl, FALSE) == tag) || -         (tag == SCB_LIST_NULL) ) -      aic7xxx_index_busy_target(p, tcl, /* unbusy */ TRUE); -    prev_scbp = NULL;  -    scbp = aic_dev->delayed_scbs.head; -    while (scbp != NULL) -    { -      prev_scbp = scbp; -      scbp = scbp->q_next; -      if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag)) -      { -        scbq_remove(&aic_dev->delayed_scbs, prev_scbp); -        if (prev_scbp->flags & SCB_WAITINGQ) -        { -          aic_dev->active_cmds++; -          p->activescbs++; -        } -        prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); -        prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; -      } -    } -  } - -  if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) -    printk(INFO_LEAD "Cleaning QINFIFO.\n", p->host_no, channel, target, lun ); -  aic7xxx_search_qinfifo(p, target, channel, lun, tag, -      SCB_RESET | SCB_QUEUED_FOR_DONE, /* requeue */ FALSE, NULL); - -/* - *  Search the waiting_scbs queue for matches, this catches any SCB_QUEUED - *  ABORT/RESET commands. - */ -  if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) -    printk(INFO_LEAD "Cleaning waiting_scbs.\n", p->host_no, channel, -      target, lun ); -  { -    struct aic7xxx_scb *scbp, *prev_scbp; - -    prev_scbp = NULL;  -    scbp = p->waiting_scbs.head; -    while (scbp != NULL) -    { -      prev_scbp = scbp; -      scbp = scbp->q_next; -      if (aic7xxx_match_scb(p, prev_scbp, target, channel, lun, tag)) -      { -        scbq_remove(&p->waiting_scbs, prev_scbp); -        if (prev_scbp->flags & SCB_WAITINGQ) -        { -          AIC_DEV(prev_scbp->cmd)->active_cmds++; -          p->activescbs++; -        } -        prev_scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); -        prev_scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; -      } -    } -  } - - -  /* -   * Search waiting for selection list. -   */ -  if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) -    printk(INFO_LEAD "Cleaning waiting for selection " -      "list.\n", p->host_no, channel, target, lun); -  { -    unsigned char next, prev, scb_index; - -    next = aic_inb(p, WAITING_SCBH);  /* Start at head of list. */ -    prev = SCB_LIST_NULL; -    while (next != SCB_LIST_NULL) -    { -      aic_outb(p, next, SCBPTR); -      scb_index = aic_inb(p, SCB_TAG); -      if (scb_index >= p->scb_data->numscbs) -      { -       /* -        * No aic7xxx_verbose check here.....we want to see this since it -        * means either the kernel driver or the sequencer screwed things up -        */ -        printk(WARN_LEAD "Waiting List inconsistency; SCB index=%d, " -          "numscbs=%d\n", p->host_no, channel, target, lun, scb_index, -          p->scb_data->numscbs); -        next = aic_inb(p, SCB_NEXT); -        aic7xxx_add_curscb_to_free_list(p); -      } -      else -      { -        scbp = p->scb_data->scb_array[scb_index]; -        if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag)) -        { -          next = aic7xxx_abort_waiting_scb(p, scbp, next, prev); -          if (scbp->flags & SCB_WAITINGQ) -          { -            AIC_DEV(scbp->cmd)->active_cmds++; -            p->activescbs++; -          } -          scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); -          scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; -          if (prev == SCB_LIST_NULL) -          { -            /* -             * This is either the first scb on the waiting list, or we -             * have already yanked the first and haven't left any behind. -             * Either way, we need to turn off the selection hardware if -             * it isn't already off. -             */ -            aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ); -            aic_outb(p, CLRSELTIMEO, CLRSINT1); -          } -        } -        else -        { -          prev = next; -          next = aic_inb(p, SCB_NEXT); -        } -      } -    } -  } - -  /* -   * Go through disconnected list and remove any entries we have queued -   * for completion, zeroing their control byte too. -   */ -  if (aic7xxx_verbose & (VERBOSE_ABORT_PROCESS | VERBOSE_RESET_PROCESS)) -    printk(INFO_LEAD "Cleaning disconnected scbs " -      "list.\n", p->host_no, channel, target, lun); -  if (p->flags & AHC_PAGESCBS) -  { -    unsigned char next, prev, scb_index; - -    next = aic_inb(p, DISCONNECTED_SCBH); -    prev = SCB_LIST_NULL; -    while (next != SCB_LIST_NULL) -    { -      aic_outb(p, next, SCBPTR); -      scb_index = aic_inb(p, SCB_TAG); -      if (scb_index > p->scb_data->numscbs) -      { -        printk(WARN_LEAD "Disconnected List inconsistency; SCB index=%d, " -          "numscbs=%d\n", p->host_no, channel, target, lun, scb_index, -          p->scb_data->numscbs); -        next = aic7xxx_rem_scb_from_disc_list(p, next, prev); -      } -      else -      { -        scbp = p->scb_data->scb_array[scb_index]; -        if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag)) -        { -          next = aic7xxx_rem_scb_from_disc_list(p, next, prev); -          if (scbp->flags & SCB_WAITINGQ) -          { -            AIC_DEV(scbp->cmd)->active_cmds++; -            p->activescbs++; -          } -          scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); -          scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; -          scbp->hscb->control = 0; -        } -        else -        { -          prev = next; -          next = aic_inb(p, SCB_NEXT); -        } -      } -    } -  } - -  /* -   * Walk the free list making sure no entries on the free list have -   * a valid SCB_TAG value or SCB_CONTROL byte. -   */ -  if (p->flags & AHC_PAGESCBS) -  { -    unsigned char next; - -    next = aic_inb(p, FREE_SCBH); -    while (next != SCB_LIST_NULL) -    { -      aic_outb(p, next, SCBPTR); -      if (aic_inb(p, SCB_TAG) < p->scb_data->numscbs) -      { -        printk(WARN_LEAD "Free list inconsistency!.\n", p->host_no, channel, -          target, lun); -        init_lists = TRUE; -        next = SCB_LIST_NULL; -      } -      else -      { -        aic_outb(p, SCB_LIST_NULL, SCB_TAG); -        aic_outb(p, 0, SCB_CONTROL); -        next = aic_inb(p, SCB_NEXT); -      } -    } -  } - -  /* -   * Go through the hardware SCB array looking for commands that -   * were active but not on any list. -   */ -  if (init_lists) -  { -    aic_outb(p, SCB_LIST_NULL, FREE_SCBH); -    aic_outb(p, SCB_LIST_NULL, WAITING_SCBH); -    aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH); -  } -  for (i = p->scb_data->maxhscbs - 1; i >= 0; i--) -  { -    unsigned char scbid; - -    aic_outb(p, i, SCBPTR); -    if (init_lists) -    { -      aic_outb(p, SCB_LIST_NULL, SCB_TAG); -      aic_outb(p, SCB_LIST_NULL, SCB_NEXT); -      aic_outb(p, 0, SCB_CONTROL); -      aic7xxx_add_curscb_to_free_list(p); -    } -    else -    { -      scbid = aic_inb(p, SCB_TAG); -      if (scbid < p->scb_data->numscbs) -      { -        scbp = p->scb_data->scb_array[scbid]; -        if (aic7xxx_match_scb(p, scbp, target, channel, lun, tag)) -        { -          aic_outb(p, 0, SCB_CONTROL); -          aic_outb(p, SCB_LIST_NULL, SCB_TAG); -          aic7xxx_add_curscb_to_free_list(p); -        } -      } -    } -  } - -  /* -   * Go through the entire SCB array now and look for commands for -   * for this target that are stillactive.  These are other (most likely -   * tagged) commands that were disconnected when the reset occurred. -   * Any commands we find here we know this about, it wasn't on any queue, -   * it wasn't in the qinfifo, it wasn't in the disconnected or waiting -   * lists, so it really must have been a paged out SCB.  In that case, -   * we shouldn't need to bother with updating any counters, just mark -   * the correct flags and go on. -   */ -  for (i = 0; i < p->scb_data->numscbs; i++) -  { -    scbp = p->scb_data->scb_array[i]; -    if ((scbp->flags & SCB_ACTIVE) && -        aic7xxx_match_scb(p, scbp, target, channel, lun, tag) && -        !aic7xxx_scb_on_qoutfifo(p, scbp)) -    { -      if (scbp->flags & SCB_WAITINGQ) -      { -        scbq_remove(&p->waiting_scbs, scbp); -        scbq_remove(&AIC_DEV(scbp->cmd)->delayed_scbs, scbp); -        AIC_DEV(scbp->cmd)->active_cmds++; -        p->activescbs++; -      } -      scbp->flags |= SCB_RESET | SCB_QUEUED_FOR_DONE; -      scbp->flags &= ~(SCB_ACTIVE | SCB_WAITINGQ); -    } -  } - -  aic_outb(p, active_scb, SCBPTR); -} - - -/*+F************************************************************************* - * Function: - *   aic7xxx_clear_intstat - * - * Description: - *   Clears the interrupt status. - *-F*************************************************************************/ -static void -aic7xxx_clear_intstat(struct aic7xxx_host *p) -{ -  /* Clear any interrupt conditions this may have caused. */ -  aic_outb(p, CLRSELDO | CLRSELDI | CLRSELINGO, CLRSINT0); -  aic_outb(p, CLRSELTIMEO | CLRATNO | CLRSCSIRSTI | CLRBUSFREE | CLRSCSIPERR | -       CLRPHASECHG | CLRREQINIT, CLRSINT1); -  aic_outb(p, CLRSCSIINT | CLRSEQINT | CLRBRKADRINT | CLRPARERR, CLRINT); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_reset_current_bus - * - * Description: - *   Reset the current SCSI bus. - *-F*************************************************************************/ -static void -aic7xxx_reset_current_bus(struct aic7xxx_host *p) -{ - -  /* Disable reset interrupts. */ -  aic_outb(p, aic_inb(p, SIMODE1) & ~ENSCSIRST, SIMODE1); - -  /* Turn off the bus' current operations, after all, we shouldn't have any -   * valid commands left to cause a RSELI and SELO once we've tossed the -   * bus away with this reset, so we might as well shut down the sequencer -   * until the bus is restarted as opposed to saving the current settings -   * and restoring them (which makes no sense to me). */ - -  /* Turn on the bus reset. */ -  aic_outb(p, aic_inb(p, SCSISEQ) | SCSIRSTO, SCSISEQ); -  while ( (aic_inb(p, SCSISEQ) & SCSIRSTO) == 0) -    mdelay(5); - -  /* -   * Some of the new Ultra2 chipsets need a longer delay after a chip -   * reset than just the init setup creates, so we have to delay here -   * before we go into a reset in order to make the chips happy. -   */ -  if (p->features & AHC_ULTRA2) -    mdelay(250); -  else -    mdelay(50); - -  /* Turn off the bus reset. */ -  aic_outb(p, 0, SCSISEQ); -  mdelay(10); - -  aic7xxx_clear_intstat(p); -  /* Re-enable reset interrupts. */ -  aic_outb(p, aic_inb(p, SIMODE1) | ENSCSIRST, SIMODE1); - -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_reset_channel - * - * Description: - *   Reset the channel. - *-F*************************************************************************/ -static void -aic7xxx_reset_channel(struct aic7xxx_host *p, int channel, int initiate_reset) -{ -  unsigned long offset_min, offset_max; -  unsigned char sblkctl; -  int cur_channel; - -  if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) -    printk(INFO_LEAD "Reset channel called, %s initiate reset.\n", -      p->host_no, channel, -1, -1, (initiate_reset==TRUE) ? "will" : "won't" ); - - -  if (channel == 1) -  { -    offset_min = 8; -    offset_max = 16; -  } -  else -  { -    if (p->features & AHC_TWIN) -    { -      /* Channel A */ -      offset_min = 0; -      offset_max = 8; -    } -    else -    { -      offset_min = 0; -      if (p->features & AHC_WIDE) -      { -        offset_max = 16; -      } -      else -      { -        offset_max = 8; -      } -    } -  } - -  while (offset_min < offset_max) -  { -    /* -     * Revert to async/narrow transfers until we renegotiate. -     */ -    aic_outb(p, 0, TARG_SCSIRATE + offset_min); -    if (p->features & AHC_ULTRA2) -    { -      aic_outb(p, 0, TARG_OFFSET + offset_min); -    } -    offset_min++; -  } - -  /* -   * Reset the bus and unpause/restart the controller -   */ -  sblkctl = aic_inb(p, SBLKCTL); -  if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) -    cur_channel = (sblkctl & SELBUSB) >> 3; -  else -    cur_channel = 0; -  if ( (cur_channel != channel) && (p->features & AHC_TWIN) ) -  { -    /* -     * Case 1: Command for another bus is active -     */ -    if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) -      printk(INFO_LEAD "Stealthily resetting idle channel.\n", p->host_no, -        channel, -1, -1); -    /* -     * Stealthily reset the other bus without upsetting the current bus. -     */ -    aic_outb(p, sblkctl ^ SELBUSB, SBLKCTL); -    aic_outb(p, aic_inb(p, SIMODE1) & ~ENBUSFREE, SIMODE1); -    if (initiate_reset) -    { -      aic7xxx_reset_current_bus(p); -    } -    aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ); -    aic7xxx_clear_intstat(p); -    aic_outb(p, sblkctl, SBLKCTL); -  } -  else -  { -    /* -     * Case 2: A command from this bus is active or we're idle. -     */ -    if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) -      printk(INFO_LEAD "Resetting currently active channel.\n", p->host_no, -        channel, -1, -1); -    aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT), -      SIMODE1); -    p->flags &= ~AHC_HANDLING_REQINITS; -    p->msg_type = MSG_TYPE_NONE; -    p->msg_len = 0; -    if (initiate_reset) -    { -      aic7xxx_reset_current_bus(p); -    } -    aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), SCSISEQ); -    aic7xxx_clear_intstat(p); -  } -  if (aic7xxx_verbose & VERBOSE_RESET_RETURN) -    printk(INFO_LEAD "Channel reset\n", p->host_no, channel, -1, -1); -  /* -   * Clean up all the state information for the pending transactions -   * on this bus. -   */ -  aic7xxx_reset_device(p, ALL_TARGETS, channel, ALL_LUNS, SCB_LIST_NULL); - -  if ( !(p->features & AHC_TWIN) ) -  { -    restart_sequencer(p); -  } - -  return; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_run_waiting_queues - * - * Description: - *   Scan the awaiting_scbs queue downloading and starting as many - *   scbs as we can. - *-F*************************************************************************/ -static void -aic7xxx_run_waiting_queues(struct aic7xxx_host *p) -{ -  struct aic7xxx_scb *scb; -  struct aic_dev_data *aic_dev; -  int sent; - - -  if (p->waiting_scbs.head == NULL) -    return; - -  sent = 0; - -  /* -   * First handle SCBs that are waiting but have been assigned a slot. -   */ -  while ((scb = scbq_remove_head(&p->waiting_scbs)) != NULL) -  { -    aic_dev = scb->cmd->device->hostdata; -    if ( !scb->tag_action ) -    { -      aic_dev->temp_q_depth = 1; -    } -    if ( aic_dev->active_cmds >= aic_dev->temp_q_depth) -    { -      scbq_insert_tail(&aic_dev->delayed_scbs, scb); -    } -    else -    { -        scb->flags &= ~SCB_WAITINGQ; -        aic_dev->active_cmds++; -        p->activescbs++; -        if ( !(scb->tag_action) ) -        { -          aic7xxx_busy_target(p, scb); -        } -        p->qinfifo[p->qinfifonext++] = scb->hscb->tag; -        sent++; -    } -  } -  if (sent) -  { -    if (p->features & AHC_QUEUE_REGS) -      aic_outb(p, p->qinfifonext, HNSCB_QOFF); -    else -    { -      pause_sequencer(p); -      aic_outb(p, p->qinfifonext, KERNEL_QINPOS); -      unpause_sequencer(p, FALSE); -    } -    if (p->activescbs > p->max_activescbs) -      p->max_activescbs = p->activescbs; -  } -} - -#ifdef CONFIG_PCI - -#define  DPE 0x80 -#define  SSE 0x40 -#define  RMA 0x20 -#define  RTA 0x10 -#define  STA 0x08 -#define  DPR 0x01 - -/*+F************************************************************************* - * Function: - *   aic7xxx_pci_intr - * - * Description: - *   Check the scsi card for PCI errors and clear the interrupt - * - *   NOTE: If you don't have this function and a 2940 card encounters - *         a PCI error condition, the machine will end up locked as the - *         interrupt handler gets slammed with non-stop PCI error interrupts - *-F*************************************************************************/ -static void -aic7xxx_pci_intr(struct aic7xxx_host *p) -{ -  unsigned char status1; - -  pci_read_config_byte(p->pdev, PCI_STATUS + 1, &status1); - -  if ( (status1 & DPE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) -    printk(WARN_LEAD "Data Parity Error during PCI address or PCI write" -      "phase.\n", p->host_no, -1, -1, -1); -  if ( (status1 & SSE) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) -    printk(WARN_LEAD "Signal System Error Detected\n", p->host_no, -      -1, -1, -1); -  if ( (status1 & RMA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) -    printk(WARN_LEAD "Received a PCI Master Abort\n", p->host_no, -      -1, -1, -1); -  if ( (status1 & RTA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) -    printk(WARN_LEAD "Received a PCI Target Abort\n", p->host_no, -      -1, -1, -1); -  if ( (status1 & STA) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) -    printk(WARN_LEAD "Signaled a PCI Target Abort\n", p->host_no, -      -1, -1, -1); -  if ( (status1 & DPR) && (aic7xxx_verbose & VERBOSE_MINOR_ERROR) ) -    printk(WARN_LEAD "Data Parity Error has been reported via PCI pin " -      "PERR#\n", p->host_no, -1, -1, -1); -   -  pci_write_config_byte(p->pdev, PCI_STATUS + 1, status1); -  if (status1 & (DPR|RMA|RTA)) -    aic_outb(p,  CLRPARERR, CLRINT); - -  if ( (aic7xxx_panic_on_abort) && (p->spurious_int > 500) ) -    aic7xxx_panic_abort(p, NULL); - -} -#endif /* CONFIG_PCI */ - -/*+F************************************************************************* - * Function: - *   aic7xxx_construct_ppr - * - * Description: - *   Build up a Parallel Protocol Request message for use with SCSI-3 - *   devices. - *-F*************************************************************************/ -static void -aic7xxx_construct_ppr(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ -  p->msg_buf[p->msg_index++] = MSG_EXTENDED; -  p->msg_buf[p->msg_index++] = MSG_EXT_PPR_LEN; -  p->msg_buf[p->msg_index++] = MSG_EXT_PPR; -  p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.period; -  p->msg_buf[p->msg_index++] = 0; -  p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.offset; -  p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.width; -  p->msg_buf[p->msg_index++] = AIC_DEV(scb->cmd)->goal.options; -  p->msg_len += 8; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_construct_sdtr - * - * Description: - *   Constucts a synchronous data transfer message in the message - *   buffer on the sequencer. - *-F*************************************************************************/ -static void -aic7xxx_construct_sdtr(struct aic7xxx_host *p, unsigned char period, -        unsigned char offset) -{ -  p->msg_buf[p->msg_index++] = MSG_EXTENDED; -  p->msg_buf[p->msg_index++] = MSG_EXT_SDTR_LEN; -  p->msg_buf[p->msg_index++] = MSG_EXT_SDTR; -  p->msg_buf[p->msg_index++] = period; -  p->msg_buf[p->msg_index++] = offset; -  p->msg_len += 5; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_construct_wdtr - * - * Description: - *   Constucts a wide data transfer message in the message buffer - *   on the sequencer. - *-F*************************************************************************/ -static void -aic7xxx_construct_wdtr(struct aic7xxx_host *p, unsigned char bus_width) -{ -  p->msg_buf[p->msg_index++] = MSG_EXTENDED; -  p->msg_buf[p->msg_index++] = MSG_EXT_WDTR_LEN; -  p->msg_buf[p->msg_index++] = MSG_EXT_WDTR; -  p->msg_buf[p->msg_index++] = bus_width; -  p->msg_len += 4; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_calc_residual - * - * Description: - *   Calculate the residual data not yet transferred. - *-F*************************************************************************/ -static void -aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ -	struct aic7xxx_hwscb *hscb; -	struct scsi_cmnd *cmd; -	int actual, i; - -  cmd = scb->cmd; -  hscb = scb->hscb; - -  /* -   *  Don't destroy valid residual information with -   *  residual coming from a check sense operation. -   */ -  if (((scb->hscb->control & DISCONNECTED) == 0) && -      (scb->flags & SCB_SENSE) == 0) -  { -    /* -     *  We had an underflow. At this time, there's only -     *  one other driver that bothers to check for this, -     *  and cmd->underflow seems to be set rather half- -     *  heartedly in the higher-level SCSI code. -     */ -    actual = scb->sg_length; -    for (i=1; i < hscb->residual_SG_segment_count; i++) -    { -      actual -= scb->sg_list[scb->sg_count - i].length; -    } -    actual -= (hscb->residual_data_count[2] << 16) | -              (hscb->residual_data_count[1] <<  8) | -              hscb->residual_data_count[0]; - -    if (actual < cmd->underflow) -    { -      if (aic7xxx_verbose & VERBOSE_MINOR_ERROR) -      { -        printk(INFO_LEAD "Underflow - Wanted %u, %s %u, residual SG " -          "count %d.\n", p->host_no, CTL_OF_SCB(scb), cmd->underflow, -          (rq_data_dir(cmd->request) == WRITE) ? "wrote" : "read", actual, -          hscb->residual_SG_segment_count); -        printk(INFO_LEAD "status 0x%x.\n", p->host_no, CTL_OF_SCB(scb), -          hscb->target_status); -      } -      /* -       * In 2.4, only send back the residual information, don't flag this -       * as an error.  Before 2.4 we had to flag this as an error because -       * the mid layer didn't check residual data counts to see if the -       * command needs retried. -       */ -      scsi_set_resid(cmd, scb->sg_length - actual); -      aic7xxx_status(cmd) = hscb->target_status; -    } -  } - -  /* -   * Clean out the residual information in the SCB for the -   * next consumer. -   */ -  hscb->residual_data_count[2] = 0; -  hscb->residual_data_count[1] = 0; -  hscb->residual_data_count[0] = 0; -  hscb->residual_SG_segment_count = 0; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_handle_device_reset - * - * Description: - *   Interrupt handler for sequencer interrupts (SEQINT). - *-F*************************************************************************/ -static void -aic7xxx_handle_device_reset(struct aic7xxx_host *p, int target, int channel) -{ -  unsigned char tindex = target; - -  tindex |= ((channel & 0x01) << 3); - -  /* -   * Go back to async/narrow transfers and renegotiate. -   */ -  aic_outb(p, 0, TARG_SCSIRATE + tindex); -  if (p->features & AHC_ULTRA2) -    aic_outb(p, 0, TARG_OFFSET + tindex); -  aic7xxx_reset_device(p, target, channel, ALL_LUNS, SCB_LIST_NULL); -  if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) -    printk(INFO_LEAD "Bus Device Reset delivered.\n", p->host_no, channel, -      target, -1); -  aic7xxx_run_done_queue(p, /*complete*/ TRUE); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_handle_seqint - * - * Description: - *   Interrupt handler for sequencer interrupts (SEQINT). - *-F*************************************************************************/ -static void -aic7xxx_handle_seqint(struct aic7xxx_host *p, unsigned char intstat) -{ -  struct aic7xxx_scb *scb; -  struct aic_dev_data *aic_dev; -  unsigned short target_mask; -  unsigned char target, lun, tindex; -  unsigned char queue_flag = FALSE; -  char channel; -  int result; - -  target = ((aic_inb(p, SAVED_TCL) >> 4) & 0x0f); -  if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) -    channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3; -  else -    channel = 0; -  tindex = target + (channel << 3); -  lun = aic_inb(p, SAVED_TCL) & 0x07; -  target_mask = (0x01 << tindex); - -  /* -   * Go ahead and clear the SEQINT now, that avoids any interrupt race -   * conditions later on in case we enable some other interrupt. -   */ -  aic_outb(p, CLRSEQINT, CLRINT); -  switch (intstat & SEQINT_MASK) -  { -    case NO_MATCH: -      { -        aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), -                 SCSISEQ); -        printk(WARN_LEAD "No active SCB for reconnecting target - Issuing " -               "BUS DEVICE RESET.\n", p->host_no, channel, target, lun); -        printk(WARN_LEAD "      SAVED_TCL=0x%x, ARG_1=0x%x, SEQADDR=0x%x\n", -               p->host_no, channel, target, lun, -               aic_inb(p, SAVED_TCL), aic_inb(p, ARG_1), -               (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0)); -        if (aic7xxx_panic_on_abort) -          aic7xxx_panic_abort(p, NULL); -      } -      break; - -    case SEND_REJECT: -      { -        if (aic7xxx_verbose & VERBOSE_MINOR_ERROR) -          printk(INFO_LEAD "Rejecting unknown message (0x%x) received from " -            "target, SEQ_FLAGS=0x%x\n", p->host_no, channel, target, lun, -            aic_inb(p, ACCUM), aic_inb(p, SEQ_FLAGS)); -      } -      break; - -    case NO_IDENT: -      { -        /* -         * The reconnecting target either did not send an identify -         * message, or did, but we didn't find an SCB to match and -         * before it could respond to our ATN/abort, it hit a dataphase. -         * The only safe thing to do is to blow it away with a bus -         * reset. -         */ -        if (aic7xxx_verbose & (VERBOSE_SEQINT | VERBOSE_RESET_MID)) -          printk(INFO_LEAD "Target did not send an IDENTIFY message; " -            "LASTPHASE 0x%x, SAVED_TCL 0x%x\n", p->host_no, channel, target, -            lun, aic_inb(p, LASTPHASE), aic_inb(p, SAVED_TCL)); - -        aic7xxx_reset_channel(p, channel, /*initiate reset*/ TRUE); -        aic7xxx_run_done_queue(p, TRUE); - -      } -      break; - -    case BAD_PHASE: -      if (aic_inb(p, LASTPHASE) == P_BUSFREE) -      { -        if (aic7xxx_verbose & VERBOSE_SEQINT) -          printk(INFO_LEAD "Missed busfree.\n", p->host_no, channel, -            target, lun); -        restart_sequencer(p); -      } -      else -      { -        if (aic7xxx_verbose & VERBOSE_SEQINT) -          printk(INFO_LEAD "Unknown scsi bus phase, continuing\n", p->host_no, -            channel, target, lun); -      } -      break; - -    case EXTENDED_MSG: -      { -        p->msg_type = MSG_TYPE_INITIATOR_MSGIN; -        p->msg_len = 0; -        p->msg_index = 0; - -#ifdef AIC7XXX_VERBOSE_DEBUGGING -        if (aic7xxx_verbose > 0xffff) -          printk(INFO_LEAD "Enabling REQINITs for MSG_IN\n", p->host_no, -                 channel, target, lun); -#endif - -       /*       -        * To actually receive the message, simply turn on -        * REQINIT interrupts and let our interrupt handler -        * do the rest (REQINIT should already be true). -        */ -        p->flags |= AHC_HANDLING_REQINITS; -        aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1); - -       /* -        * We don't want the sequencer unpaused yet so we return early -        */ -        return; -      } - -    case REJECT_MSG: -      { -        /* -         * What we care about here is if we had an outstanding SDTR -         * or WDTR message for this target. If we did, this is a -         * signal that the target is refusing negotiation. -         */ -        unsigned char scb_index; -        unsigned char last_msg; - -        scb_index = aic_inb(p, SCB_TAG); -        scb = p->scb_data->scb_array[scb_index]; -	aic_dev = AIC_DEV(scb->cmd); -        last_msg = aic_inb(p, LAST_MSG); - -        if ( (last_msg == MSG_IDENTIFYFLAG) && -             (scb->tag_action) && -            !(scb->flags & SCB_MSGOUT_BITS) ) -        { -          if (scb->tag_action == MSG_ORDERED_Q_TAG) -          { -            /* -             * OK...the device seems able to accept tagged commands, but -             * not ordered tag commands, only simple tag commands.  So, we -             * disable ordered tag commands and go on with life just like -             * normal. -             */ -	    scsi_adjust_queue_depth(scb->cmd->device, MSG_SIMPLE_TAG, -			    scb->cmd->device->queue_depth); -            scb->tag_action = MSG_SIMPLE_Q_TAG; -            scb->hscb->control &= ~SCB_TAG_TYPE; -            scb->hscb->control |= MSG_SIMPLE_Q_TAG; -            aic_outb(p, scb->hscb->control, SCB_CONTROL); -            /* -             * OK..we set the tag type to simple tag command, now we re-assert -             * ATNO and hope this will take us into the identify phase again -             * so we can resend the tag type and info to the device. -             */ -            aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT); -            aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); -          } -          else if (scb->tag_action == MSG_SIMPLE_Q_TAG) -          { -            unsigned char i; -            struct aic7xxx_scb *scbp; -            int old_verbose; -            /* -             * Hmmmm....the device is flaking out on tagged commands. -             */ -	    scsi_adjust_queue_depth(scb->cmd->device, 0 /* untagged */, -			    p->host->cmd_per_lun); -            aic_dev->max_q_depth = aic_dev->temp_q_depth = 1; -            /* -             * We set this command up as a bus device reset.  However, we have -             * to clear the tag type as it's causing us problems.  We shouldn't -             * have to worry about any other commands being active, since if -             * the device is refusing tagged commands, this should be the -             * first tagged command sent to the device, however, we do have -             * to worry about any other tagged commands that may already be -             * in the qinfifo.  The easiest way to do this, is to issue a BDR, -             * send all the commands back to the mid level code, then let them -             * come back and get rebuilt as untagged commands. -             */ -            scb->tag_action = 0; -            scb->hscb->control &= ~(TAG_ENB | SCB_TAG_TYPE); -            aic_outb(p,  scb->hscb->control, SCB_CONTROL); - -            old_verbose = aic7xxx_verbose; -            aic7xxx_verbose &= ~(VERBOSE_RESET|VERBOSE_ABORT); -            for (i=0; i < p->scb_data->numscbs; i++) -            { -              scbp = p->scb_data->scb_array[i]; -              if ((scbp->flags & SCB_ACTIVE) && (scbp != scb)) -              { -                if (aic7xxx_match_scb(p, scbp, target, channel, lun, i)) -                { -                  aic7xxx_reset_device(p, target, channel, lun, i); -                } -              } -            } -            aic7xxx_run_done_queue(p, TRUE); -            aic7xxx_verbose = old_verbose; -            /* -             * Wait until after the for loop to set the busy index since -             * aic7xxx_reset_device will clear the busy index during its -             * operation. -             */ -            aic7xxx_busy_target(p, scb); -            printk(INFO_LEAD "Device is refusing tagged commands, using " -              "untagged I/O.\n", p->host_no, channel, target, lun); -            aic_outb(p, MSG_IDENTIFYFLAG, MSG_OUT); -            aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); -          } -        } -        else if (scb->flags & SCB_MSGOUT_PPR) -        { -          /* -           * As per the draft specs, any device capable of supporting any of -           * the option values other than 0 are not allowed to reject the -           * PPR message.  Instead, they must negotiate out what they do -           * support instead of rejecting our offering or else they cause -           * a parity error during msg_out phase to signal that they don't -           * like our settings. -           */ -          aic_dev->needppr = aic_dev->needppr_copy = 0; -          aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT, -            (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE), aic_dev); -          aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0, -                               AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE, -			       aic_dev); -          aic_dev->goal.options = aic_dev->dtr_pending = 0; -          scb->flags &= ~SCB_MSGOUT_BITS; -          if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) -          { -            printk(INFO_LEAD "Device is rejecting PPR messages, falling " -              "back.\n", p->host_no, channel, target, lun); -          } -          if ( aic_dev->goal.width ) -          { -            aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; -            aic_dev->dtr_pending = 1; -            scb->flags |= SCB_MSGOUT_WDTR; -          } -          if ( aic_dev->goal.offset ) -          { -            aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; -            if( !aic_dev->dtr_pending ) -            { -              aic_dev->dtr_pending = 1; -              scb->flags |= SCB_MSGOUT_SDTR; -            } -          } -          if ( aic_dev->dtr_pending ) -          { -            aic_outb(p, HOST_MSG, MSG_OUT); -            aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); -          } -        } -        else if (scb->flags & SCB_MSGOUT_WDTR) -        { -          /* -           * note 8bit xfers and clear flag -           */ -          aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; -          scb->flags &= ~SCB_MSGOUT_BITS; -          aic7xxx_set_width(p, target, channel, lun, MSG_EXT_WDTR_BUS_8_BIT, -            (AHC_TRANS_ACTIVE|AHC_TRANS_GOAL|AHC_TRANS_CUR), aic_dev); -          aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0, -                               AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE, -			       aic_dev); -          if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) -          { -            printk(INFO_LEAD "Device is rejecting WDTR messages, using " -              "narrow transfers.\n", p->host_no, channel, target, lun); -          } -          aic_dev->needsdtr = aic_dev->needsdtr_copy; -        } -        else if (scb->flags & SCB_MSGOUT_SDTR) -        { -         /* -          * note asynch xfers and clear flag -          */ -          aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; -          scb->flags &= ~SCB_MSGOUT_BITS; -          aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0, -            (AHC_TRANS_CUR|AHC_TRANS_ACTIVE|AHC_TRANS_GOAL), aic_dev); -          if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) -          { -            printk(INFO_LEAD "Device is rejecting SDTR messages, using " -              "async transfers.\n", p->host_no, channel, target, lun); -          } -        } -        else if (aic7xxx_verbose & VERBOSE_SEQINT) -        { -          /* -           * Otherwise, we ignore it. -           */ -          printk(INFO_LEAD "Received MESSAGE_REJECT for unknown cause.  " -            "Ignoring.\n", p->host_no, channel, target, lun); -        } -      } -      break; - -    case BAD_STATUS: -      { -	unsigned char scb_index; -	struct aic7xxx_hwscb *hscb; -	struct scsi_cmnd *cmd; - -	/* The sequencer will notify us when a command has an error that -	 * would be of interest to the kernel.  This allows us to leave -	 * the sequencer running in the common case of command completes -	 * without error.  The sequencer will have DMA'd the SCB back -	 * up to us, so we can reference the drivers SCB array. -	 * -	 * Set the default return value to 0 indicating not to send -	 * sense.  The sense code will change this if needed and this -	 * reduces code duplication. -	 */ -        aic_outb(p, 0, RETURN_1); -        scb_index = aic_inb(p, SCB_TAG); -        if (scb_index > p->scb_data->numscbs) -        { -          printk(WARN_LEAD "Invalid SCB during SEQINT 0x%02x, SCB_TAG %d.\n", -            p->host_no, channel, target, lun, intstat, scb_index); -          break; -        } -        scb = p->scb_data->scb_array[scb_index]; -        hscb = scb->hscb; - -        if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL)) -        { -          printk(WARN_LEAD "Invalid SCB during SEQINT 0x%x, scb %d, flags 0x%x," -            " cmd 0x%lx.\n", p->host_no, channel, target, lun, intstat, -            scb_index, scb->flags, (unsigned long) scb->cmd); -        } -        else -        { -          cmd = scb->cmd; -	  aic_dev = AIC_DEV(scb->cmd); -          hscb->target_status = aic_inb(p, SCB_TARGET_STATUS); -          aic7xxx_status(cmd) = hscb->target_status; - -          cmd->result = hscb->target_status; - -          switch (status_byte(hscb->target_status)) -          { -            case GOOD: -              if (aic7xxx_verbose & VERBOSE_SEQINT) -                printk(INFO_LEAD "Interrupted for status of GOOD???\n", -                  p->host_no, CTL_OF_SCB(scb)); -              break; - -            case COMMAND_TERMINATED: -            case CHECK_CONDITION: -              if ( !(scb->flags & SCB_SENSE) ) -              { -                /* -                 * Send a sense command to the requesting target. -                 * XXX - revisit this and get rid of the memcopys. -                 */ -                memcpy(scb->sense_cmd, &generic_sense[0], -                       sizeof(generic_sense)); - -                scb->sense_cmd[1] = (cmd->device->lun << 5); -                scb->sense_cmd[4] = SCSI_SENSE_BUFFERSIZE; - -                scb->sg_list[0].length =  -                  cpu_to_le32(SCSI_SENSE_BUFFERSIZE); -		scb->sg_list[0].address = -                        cpu_to_le32(pci_map_single(p->pdev, cmd->sense_buffer, -                                                   SCSI_SENSE_BUFFERSIZE, -                                                   PCI_DMA_FROMDEVICE)); - -                /* -                 * XXX - We should allow disconnection, but can't as it -                 * might allow overlapped tagged commands. -                 */ -                /* hscb->control &= DISCENB; */ -                hscb->control = 0; -                hscb->target_status = 0; -                hscb->SG_list_pointer =  -		  cpu_to_le32(SCB_DMA_ADDR(scb, scb->sg_list)); -                hscb->SCSI_cmd_pointer =  -                  cpu_to_le32(SCB_DMA_ADDR(scb, scb->sense_cmd)); -                hscb->data_count = scb->sg_list[0].length; -                hscb->data_pointer = scb->sg_list[0].address; -                hscb->SCSI_cmd_length = COMMAND_SIZE(scb->sense_cmd[0]); -                hscb->residual_SG_segment_count = 0; -                hscb->residual_data_count[0] = 0; -                hscb->residual_data_count[1] = 0; -                hscb->residual_data_count[2] = 0; - -                scb->sg_count = hscb->SG_segment_count = 1; -                scb->sg_length = SCSI_SENSE_BUFFERSIZE; -                scb->tag_action = 0; -                scb->flags |= SCB_SENSE; -                /* -                 * Ensure the target is busy since this will be an -                 * an untagged request. -                 */ -#ifdef AIC7XXX_VERBOSE_DEBUGGING -                if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -                { -                  if (scb->flags & SCB_MSGOUT_BITS) -                    printk(INFO_LEAD "Requesting SENSE with %s\n", p->host_no, -                           CTL_OF_SCB(scb), (scb->flags & SCB_MSGOUT_SDTR) ? -                           "SDTR" : "WDTR"); -                  else -                    printk(INFO_LEAD "Requesting SENSE, no MSG\n", p->host_no, -                           CTL_OF_SCB(scb)); -                } -#endif -                aic7xxx_busy_target(p, scb); -                aic_outb(p, SEND_SENSE, RETURN_1); -                aic7xxx_error(cmd) = DID_OK; -                break; -              }  /* first time sense, no errors */ -              printk(INFO_LEAD "CHECK_CONDITION on REQUEST_SENSE, returning " -                     "an error.\n", p->host_no, CTL_OF_SCB(scb)); -              aic7xxx_error(cmd) = DID_ERROR; -              scb->flags &= ~SCB_SENSE; -              break; - -            case QUEUE_FULL: -              queue_flag = TRUE;    /* Mark that this is a QUEUE_FULL and */ -            case BUSY:              /* drop through to here */ -            { -              struct aic7xxx_scb *next_scbp, *prev_scbp; -              unsigned char active_hscb, next_hscb, prev_hscb, scb_index; -              /* -               * We have to look three places for queued commands: -               *  1: p->waiting_scbs queue -               *  2: QINFIFO -               *  3: WAITING_SCBS list on card (for commands that are started -               *     but haven't yet made it to the device) -	       * -	       * Of special note here is that commands on 2 or 3 above will -	       * have already been marked as active, while commands on 1 will -	       * not.  The aic7xxx_done() function will want to unmark them -	       * from active, so any commands we pull off of 1 need to -	       * up the active count. -               */ -              next_scbp = p->waiting_scbs.head; -              while ( next_scbp != NULL ) -              { -                prev_scbp = next_scbp; -                next_scbp = next_scbp->q_next; -                if ( aic7xxx_match_scb(p, prev_scbp, target, channel, lun, -                     SCB_LIST_NULL) ) -                { -                  scbq_remove(&p->waiting_scbs, prev_scbp); -		  scb->flags = SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL; -		  p->activescbs++; -		  aic_dev->active_cmds++; -                } -              } -              aic7xxx_search_qinfifo(p, target, channel, lun, -                SCB_LIST_NULL, SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL, -	       	FALSE, NULL); -              next_scbp = NULL; -              active_hscb = aic_inb(p, SCBPTR); -              prev_hscb = next_hscb = scb_index = SCB_LIST_NULL; -              next_hscb = aic_inb(p, WAITING_SCBH); -              while (next_hscb != SCB_LIST_NULL) -              { -                aic_outb(p, next_hscb, SCBPTR); -                scb_index = aic_inb(p, SCB_TAG); -                if (scb_index < p->scb_data->numscbs) -                { -                  next_scbp = p->scb_data->scb_array[scb_index]; -                  if (aic7xxx_match_scb(p, next_scbp, target, channel, lun, -                      SCB_LIST_NULL) ) -                  { -		    next_scbp->flags = SCB_QUEUED_FOR_DONE | SCB_QUEUE_FULL; -                    next_hscb = aic_inb(p, SCB_NEXT); -                    aic_outb(p, 0, SCB_CONTROL); -                    aic_outb(p, SCB_LIST_NULL, SCB_TAG); -                    aic7xxx_add_curscb_to_free_list(p); -                    if (prev_hscb == SCB_LIST_NULL) -                    { -                      /* We were first on the list, -                       * so we kill the selection -                       * hardware.  Let the sequencer -                       * re-init the hardware itself -                       */ -                      aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ); -                      aic_outb(p, CLRSELTIMEO, CLRSINT1); -                      aic_outb(p, next_hscb, WAITING_SCBH); -                    } -                    else -                    { -                      aic_outb(p, prev_hscb, SCBPTR); -                      aic_outb(p, next_hscb, SCB_NEXT); -                    } -                  } -                  else -                  { -                    prev_hscb = next_hscb; -                    next_hscb = aic_inb(p, SCB_NEXT); -                  } -                } /* scb_index >= p->scb_data->numscbs */ -              } -              aic_outb(p, active_hscb, SCBPTR); -	      aic7xxx_run_done_queue(p, FALSE); -                   -#ifdef AIC7XXX_VERBOSE_DEBUGGING -              if( (aic7xxx_verbose & VERBOSE_MINOR_ERROR) || -                  (aic7xxx_verbose > 0xffff) ) -              { -                if (queue_flag) -                  printk(INFO_LEAD "Queue full received; queue depth %d, " -                    "active %d\n", p->host_no, CTL_OF_SCB(scb), -                    aic_dev->max_q_depth, aic_dev->active_cmds); -                else -                  printk(INFO_LEAD "Target busy\n", p->host_no, CTL_OF_SCB(scb)); -              } -#endif -              if (queue_flag) -              { -		int diff; -		result = scsi_track_queue_full(cmd->device, -			       	aic_dev->active_cmds); -		if ( result < 0 ) -		{ -                  if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -                    printk(INFO_LEAD "Tagged Command Queueing disabled.\n", -			p->host_no, CTL_OF_SCB(scb)); -		  diff = aic_dev->max_q_depth - p->host->cmd_per_lun; -		  aic_dev->temp_q_depth = 1; -		  aic_dev->max_q_depth = 1; -		} -		else if ( result > 0 ) -		{ -                  if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -                    printk(INFO_LEAD "Queue depth reduced to %d\n", p->host_no, -                      CTL_OF_SCB(scb), result); -		  diff = aic_dev->max_q_depth - result; -		  aic_dev->max_q_depth = result; -		  /* temp_q_depth could have been dropped to 1 for an untagged -		   * command that might be coming up */ -		  if(aic_dev->temp_q_depth > result) -		    aic_dev->temp_q_depth = result; -		} -		/* We should free up the no unused SCB entries.  But, that's -		 * a difficult thing to do because we use a direct indexed -		 * array, so we can't just take any entries and free them, -		 * we *have* to free the ones at the end of the array, and -		 * they very well could be in use right now, which means -		 * in order to do this right, we have to add a delayed -		 * freeing mechanism tied into the scb_free() code area. -		 * We'll add that later. -		 */ -	      } -              break; -            } -             -            default: -              if (aic7xxx_verbose & VERBOSE_SEQINT) -                printk(INFO_LEAD "Unexpected target status 0x%x.\n", p->host_no, -                     CTL_OF_SCB(scb), scb->hscb->target_status); -              if (!aic7xxx_error(cmd)) -              { -                aic7xxx_error(cmd) = DID_RETRY_COMMAND; -              } -              break; -          }  /* end switch */ -        }  /* end else of */ -      } -      break; - -    case AWAITING_MSG: -      { -        unsigned char scb_index, msg_out; - -        scb_index = aic_inb(p, SCB_TAG); -        msg_out = aic_inb(p, MSG_OUT); -        scb = p->scb_data->scb_array[scb_index]; -	aic_dev = AIC_DEV(scb->cmd); -        p->msg_index = p->msg_len = 0; -        /* -         * This SCB had a MK_MESSAGE set in its control byte informing -         * the sequencer that we wanted to send a special message to -         * this target. -         */ - -        if ( !(scb->flags & SCB_DEVICE_RESET) && -              (msg_out == MSG_IDENTIFYFLAG) && -              (scb->hscb->control & TAG_ENB) ) -        { -          p->msg_buf[p->msg_index++] = scb->tag_action; -          p->msg_buf[p->msg_index++] = scb->hscb->tag; -          p->msg_len += 2; -        } - -        if (scb->flags & SCB_DEVICE_RESET) -        { -          p->msg_buf[p->msg_index++] = MSG_BUS_DEV_RESET; -          p->msg_len++; -          if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) -            printk(INFO_LEAD "Bus device reset mailed.\n", -                 p->host_no, CTL_OF_SCB(scb)); -        } -        else if (scb->flags & SCB_ABORT) -        { -          if (scb->tag_action) -          { -            p->msg_buf[p->msg_index++] = MSG_ABORT_TAG; -          } -          else -          { -            p->msg_buf[p->msg_index++] = MSG_ABORT; -          } -          p->msg_len++; -          if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS) -            printk(INFO_LEAD "Abort message mailed.\n", p->host_no, -              CTL_OF_SCB(scb)); -        } -        else if (scb->flags & SCB_MSGOUT_PPR) -        { -          if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -          { -            printk(INFO_LEAD "Sending PPR (%d/%d/%d/%d) message.\n", -                   p->host_no, CTL_OF_SCB(scb), -                   aic_dev->goal.period, -                   aic_dev->goal.offset, -                   aic_dev->goal.width, -                   aic_dev->goal.options); -          } -          aic7xxx_construct_ppr(p, scb); -        } -        else if (scb->flags & SCB_MSGOUT_WDTR) -        { -          if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -          { -            printk(INFO_LEAD "Sending WDTR message.\n", p->host_no, -                   CTL_OF_SCB(scb)); -          } -          aic7xxx_construct_wdtr(p, aic_dev->goal.width); -        } -        else if (scb->flags & SCB_MSGOUT_SDTR) -        { -          unsigned int max_sync, period; -          unsigned char options = 0; -          /* -           * Now that the device is selected, use the bits in SBLKCTL and -           * SSTAT2 to determine the max sync rate for this device. -           */ -          if (p->features & AHC_ULTRA2) -          { -            if ( (aic_inb(p, SBLKCTL) & ENAB40) && -                !(aic_inb(p, SSTAT2) & EXP_ACTIVE) ) -            { -              max_sync = AHC_SYNCRATE_ULTRA2; -            } -            else -            { -              max_sync = AHC_SYNCRATE_ULTRA; -            } -          } -          else if (p->features & AHC_ULTRA) -          { -            max_sync = AHC_SYNCRATE_ULTRA; -          } -          else -          { -            max_sync = AHC_SYNCRATE_FAST; -          } -          period = aic_dev->goal.period; -          aic7xxx_find_syncrate(p, &period, max_sync, &options); -          if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -          { -            printk(INFO_LEAD "Sending SDTR %d/%d message.\n", p->host_no, -                   CTL_OF_SCB(scb), period, -                   aic_dev->goal.offset); -          } -          aic7xxx_construct_sdtr(p, period, aic_dev->goal.offset); -        } -        else  -        { -          panic("aic7xxx: AWAITING_MSG for an SCB that does " -                "not have a waiting message.\n"); -        } -        /* -         * We've set everything up to send our message, now to actually do -         * so we need to enable reqinit interrupts and let the interrupt -         * handler do the rest.  We don't want to unpause the sequencer yet -         * though so we'll return early.  We also have to make sure that -         * we clear the SEQINT *BEFORE* we set the REQINIT handler active -         * or else it's possible on VLB cards to lose the first REQINIT -         * interrupt.  Edge triggered EISA cards could also lose this -         * interrupt, although PCI and level triggered cards should not -         * have this problem since they continually interrupt the kernel -         * until we take care of the situation. -         */ -        scb->flags |= SCB_MSGOUT_SENT; -        p->msg_index = 0; -        p->msg_type = MSG_TYPE_INITIATOR_MSGOUT; -        p->flags |= AHC_HANDLING_REQINITS; -        aic_outb(p, aic_inb(p, SIMODE1) | ENREQINIT, SIMODE1); -        return; -      } -      break; - -    case DATA_OVERRUN: -      { -        unsigned char scb_index = aic_inb(p, SCB_TAG); -        unsigned char lastphase = aic_inb(p, LASTPHASE); -        unsigned int i; - -        scb = (p->scb_data->scb_array[scb_index]); -        /* -         * XXX - What do we really want to do on an overrun?  The -         *       mid-level SCSI code should handle this, but for now, -         *       we'll just indicate that the command should retried. -         *    If we retrieved sense info on this target, then the  -         *    base SENSE info should have been saved prior to the -         *    overrun error.  In that case, we return DID_OK and let -         *    the mid level code pick up on the sense info.  Otherwise -         *    we return DID_ERROR so the command will get retried. -         */ -        if ( !(scb->flags & SCB_SENSE) ) -        { -          printk(WARN_LEAD "Data overrun detected in %s phase, tag %d;\n", -            p->host_no, CTL_OF_SCB(scb),  -            (lastphase == P_DATAIN) ? "Data-In" : "Data-Out", scb->hscb->tag); -          printk(KERN_WARNING "  %s seen Data Phase. Length=%d, NumSGs=%d.\n", -            (aic_inb(p, SEQ_FLAGS) & DPHASE) ? "Have" : "Haven't", -            scb->sg_length, scb->sg_count); -          printk(KERN_WARNING "  Raw SCSI Command: 0x"); -          for (i = 0; i < scb->hscb->SCSI_cmd_length; i++) -          { -            printk("%02x ", scb->cmd->cmnd[i]); -          } -          printk("\n"); -          if(aic7xxx_verbose > 0xffff) -          { -            for (i = 0; i < scb->sg_count; i++) -            { -              printk(KERN_WARNING "     sg[%d] - Addr 0x%x : Length %d\n", -                 i,  -                 le32_to_cpu(scb->sg_list[i].address), -                 le32_to_cpu(scb->sg_list[i].length) ); -            } -          } -          aic7xxx_error(scb->cmd) = DID_ERROR; -        } -        else -          printk(INFO_LEAD "Data Overrun during SEND_SENSE operation.\n", -            p->host_no, CTL_OF_SCB(scb)); -      } -      break; - -    case WIDE_RESIDUE: -      { -        unsigned char resid_sgcnt, index; -        unsigned char scb_index = aic_inb(p, SCB_TAG); -        unsigned int cur_addr, resid_dcnt; -        unsigned int native_addr, native_length, sg_addr; -        int i; - -        if(scb_index > p->scb_data->numscbs) -        { -          printk(WARN_LEAD "invalid scb_index during WIDE_RESIDUE.\n", -            p->host_no, -1, -1, -1); -          /* -           * XXX: Add error handling here -           */ -          break; -        } -        scb = p->scb_data->scb_array[scb_index]; -        if(!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL)) -        { -          printk(WARN_LEAD "invalid scb during WIDE_RESIDUE flags:0x%x " -                 "scb->cmd:0x%lx\n", p->host_no, CTL_OF_SCB(scb), -                 scb->flags, (unsigned long)scb->cmd); -          break; -        } -        if(aic7xxx_verbose & VERBOSE_MINOR_ERROR) -          printk(INFO_LEAD "Got WIDE_RESIDUE message, patching up data " -                 "pointer.\n", p->host_no, CTL_OF_SCB(scb)); - -        /* -         * We have a valid scb to use on this WIDE_RESIDUE message, so -         * we need to walk the sg list looking for this particular sg -         * segment, then see if we happen to be at the very beginning of -         * the segment.  If we are, then we have to back things up to -         * the previous segment.  If not, then we simply need to remove -         * one byte from this segments address and add one to the byte -         * count. -         */ -        cur_addr = aic_inb(p, SHADDR) | (aic_inb(p, SHADDR + 1) << 8) | -          (aic_inb(p, SHADDR + 2) << 16) | (aic_inb(p, SHADDR + 3) << 24); -        sg_addr = aic_inb(p, SG_COUNT + 1) | (aic_inb(p, SG_COUNT + 2) << 8) | -          (aic_inb(p, SG_COUNT + 3) << 16) | (aic_inb(p, SG_COUNT + 4) << 24); -        resid_sgcnt = aic_inb(p, SCB_RESID_SGCNT); -        resid_dcnt = aic_inb(p, SCB_RESID_DCNT) | -          (aic_inb(p, SCB_RESID_DCNT + 1) << 8) | -          (aic_inb(p, SCB_RESID_DCNT + 2) << 16); -        index = scb->sg_count - ((resid_sgcnt) ? resid_sgcnt : 1); -        native_addr = le32_to_cpu(scb->sg_list[index].address); -        native_length = le32_to_cpu(scb->sg_list[index].length); -        /* -         * If resid_dcnt == native_length, then we just loaded this SG -         * segment and we need to back it up one... -         */ -        if(resid_dcnt == native_length) -        { -          if(index == 0) -          { -            /* -             * Oops, this isn't right, we can't back up to before the -             * beginning.  This must be a bogus message, ignore it. -             */ -            break; -          } -          resid_dcnt = 1; -          resid_sgcnt += 1; -          native_addr = le32_to_cpu(scb->sg_list[index - 1].address); -          native_length = le32_to_cpu(scb->sg_list[index - 1].length); -          cur_addr = native_addr + (native_length - 1); -          sg_addr -= sizeof(struct hw_scatterlist); -        } -        else -        { -          /* -           * resid_dcnt != native_length, so we are in the middle of a SG -           * element.  Back it up one byte and leave the rest alone. -           */ -          resid_dcnt += 1; -          cur_addr -= 1; -        } -         -        /* -         * Output the new addresses and counts to the right places on the -         * card. -         */ -        aic_outb(p, resid_sgcnt, SG_COUNT); -        aic_outb(p, resid_sgcnt, SCB_RESID_SGCNT); -        aic_outb(p, sg_addr & 0xff, SG_COUNT + 1); -        aic_outb(p, (sg_addr >> 8) & 0xff, SG_COUNT + 2); -        aic_outb(p, (sg_addr >> 16) & 0xff, SG_COUNT + 3); -        aic_outb(p, (sg_addr >> 24) & 0xff, SG_COUNT + 4); -        aic_outb(p, resid_dcnt & 0xff, SCB_RESID_DCNT); -        aic_outb(p, (resid_dcnt >> 8) & 0xff, SCB_RESID_DCNT + 1); -        aic_outb(p, (resid_dcnt >> 16) & 0xff, SCB_RESID_DCNT + 2); - -        /* -         * The sequencer actually wants to find the new address -         * in the SHADDR register set.  On the Ultra2 and later controllers -         * this register set is readonly.  In order to get the right number -         * into the register, you actually have to enter it in HADDR and then -         * use the PRELOADEN bit of DFCNTRL to drop it through from the -         * HADDR register to the SHADDR register.  On non-Ultra2 controllers, -         * we simply write it direct. -         */ -        if(p->features & AHC_ULTRA2) -        { -          /* -           * We might as well be accurate and drop both the resid_dcnt and -           * cur_addr into HCNT and HADDR and have both of them drop -           * through to the shadow layer together. -           */ -          aic_outb(p, resid_dcnt & 0xff, HCNT); -          aic_outb(p, (resid_dcnt >> 8) & 0xff, HCNT + 1); -          aic_outb(p, (resid_dcnt >> 16) & 0xff, HCNT + 2); -          aic_outb(p, cur_addr & 0xff, HADDR); -          aic_outb(p, (cur_addr >> 8) & 0xff, HADDR + 1); -          aic_outb(p, (cur_addr >> 16) & 0xff, HADDR + 2); -          aic_outb(p, (cur_addr >> 24) & 0xff, HADDR + 3); -          aic_outb(p, aic_inb(p, DMAPARAMS) | PRELOADEN, DFCNTRL); -          udelay(1); -          aic_outb(p, aic_inb(p, DMAPARAMS) & ~(SCSIEN|HDMAEN), DFCNTRL); -          i=0; -          while(((aic_inb(p, DFCNTRL) & (SCSIEN|HDMAEN)) != 0) && (i++ < 1000)) -          { -            udelay(1); -          } -        } -        else -        { -          aic_outb(p, cur_addr & 0xff, SHADDR); -          aic_outb(p, (cur_addr >> 8) & 0xff, SHADDR + 1); -          aic_outb(p, (cur_addr >> 16) & 0xff, SHADDR + 2); -          aic_outb(p, (cur_addr >> 24) & 0xff, SHADDR + 3); -        } -      } -      break; - -    case SEQ_SG_FIXUP: -    { -      unsigned char scb_index, tmp; -      int sg_addr, sg_length; - -      scb_index = aic_inb(p, SCB_TAG); - -      if(scb_index > p->scb_data->numscbs) -      { -        printk(WARN_LEAD "invalid scb_index during SEQ_SG_FIXUP.\n", -          p->host_no, -1, -1, -1); -        printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 " -           "0x%x\n", p->host_no, -1, -1, -1, -           aic_inb(p, SCSISIGI), -           aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), -           aic_inb(p, SSTAT0), aic_inb(p, SSTAT1)); -        printk(INFO_LEAD "SG_CACHEPTR 0x%x, SSTAT2 0x%x, STCNT 0x%x\n", -           p->host_no, -1, -1, -1, aic_inb(p, SG_CACHEPTR), -           aic_inb(p, SSTAT2), aic_inb(p, STCNT + 2) << 16 | -           aic_inb(p, STCNT + 1) << 8 | aic_inb(p, STCNT)); -        /* -         * XXX: Add error handling here -         */ -        break; -      } -      scb = p->scb_data->scb_array[scb_index]; -      if(!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL)) -      { -        printk(WARN_LEAD "invalid scb during SEQ_SG_FIXUP flags:0x%x " -               "scb->cmd:0x%p\n", p->host_no, CTL_OF_SCB(scb), -               scb->flags, scb->cmd); -        printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 " -           "0x%x\n", p->host_no, CTL_OF_SCB(scb), -           aic_inb(p, SCSISIGI), -           aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), -           aic_inb(p, SSTAT0), aic_inb(p, SSTAT1)); -        printk(INFO_LEAD "SG_CACHEPTR 0x%x, SSTAT2 0x%x, STCNT 0x%x\n", -           p->host_no, CTL_OF_SCB(scb), aic_inb(p, SG_CACHEPTR), -           aic_inb(p, SSTAT2), aic_inb(p, STCNT + 2) << 16 | -           aic_inb(p, STCNT + 1) << 8 | aic_inb(p, STCNT)); -        break; -      } -      if(aic7xxx_verbose & VERBOSE_MINOR_ERROR) -        printk(INFO_LEAD "Fixing up SG address for sequencer.\n", p->host_no, -               CTL_OF_SCB(scb)); -      /* -       * Advance the SG pointer to the next element in the list -       */ -      tmp = aic_inb(p, SG_NEXT); -      tmp += SG_SIZEOF; -      aic_outb(p, tmp, SG_NEXT); -      if( tmp < SG_SIZEOF ) -        aic_outb(p, aic_inb(p, SG_NEXT + 1) + 1, SG_NEXT + 1); -      tmp = aic_inb(p, SG_COUNT) - 1; -      aic_outb(p, tmp, SG_COUNT); -      sg_addr = le32_to_cpu(scb->sg_list[scb->sg_count - tmp].address); -      sg_length = le32_to_cpu(scb->sg_list[scb->sg_count - tmp].length); -      /* -       * Now stuff the element we just advanced past down onto the -       * card so it can be stored in the residual area. -       */ -      aic_outb(p, sg_addr & 0xff, HADDR); -      aic_outb(p, (sg_addr >> 8) & 0xff, HADDR + 1); -      aic_outb(p, (sg_addr >> 16) & 0xff, HADDR + 2); -      aic_outb(p, (sg_addr >> 24) & 0xff, HADDR + 3); -      aic_outb(p, sg_length & 0xff, HCNT); -      aic_outb(p, (sg_length >> 8) & 0xff, HCNT + 1); -      aic_outb(p, (sg_length >> 16) & 0xff, HCNT + 2); -      aic_outb(p, (tmp << 2) | ((tmp == 1) ? LAST_SEG : 0), SG_CACHEPTR); -      aic_outb(p, aic_inb(p, DMAPARAMS), DFCNTRL); -      while(aic_inb(p, SSTAT0) & SDONE) udelay(1); -      while(aic_inb(p, DFCNTRL) & (HDMAEN|SCSIEN)) aic_outb(p, 0, DFCNTRL); -    } -    break; - -#ifdef AIC7XXX_NOT_YET  -    case TRACEPOINT2: -      { -        printk(INFO_LEAD "Tracepoint #2 reached.\n", p->host_no, -               channel, target, lun); -      } -      break; - -    /* XXX Fill these in later */ -    case MSG_BUFFER_BUSY: -      printk("aic7xxx: Message buffer busy.\n"); -      break; -    case MSGIN_PHASEMIS: -      printk("aic7xxx: Message-in phasemis.\n"); -      break; -#endif - -    default:                   /* unknown */ -      printk(WARN_LEAD "Unknown SEQINT, INTSTAT 0x%x, SCSISIGI 0x%x.\n", -             p->host_no, channel, target, lun, intstat, -             aic_inb(p, SCSISIGI)); -      break; -  } - -  /* -   * Clear the sequencer interrupt and unpause the sequencer. -   */ -  unpause_sequencer(p, /* unpause always */ TRUE); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_parse_msg - * - * Description: - *   Parses incoming messages into actions on behalf of - *   aic7xxx_handle_reqinit - *_F*************************************************************************/ -static int -aic7xxx_parse_msg(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ -  int reject, reply, done; -  unsigned char target_scsirate, tindex; -  unsigned short target_mask; -  unsigned char target, channel, lun; -  unsigned char bus_width, new_bus_width; -  unsigned char trans_options, new_trans_options; -  unsigned int period, new_period, offset, new_offset, maxsync; -  struct aic7xxx_syncrate *syncrate; -  struct aic_dev_data *aic_dev; - -  target = scb->cmd->device->id; -  channel = scb->cmd->device->channel; -  lun = scb->cmd->device->lun; -  reply = reject = done = FALSE; -  tindex = TARGET_INDEX(scb->cmd); -  aic_dev = AIC_DEV(scb->cmd); -  target_scsirate = aic_inb(p, TARG_SCSIRATE + tindex); -  target_mask = (0x01 << tindex); - -  /* -   * Parse as much of the message as is available, -   * rejecting it if we don't support it.  When -   * the entire message is available and has been -   * handled, return TRUE indicating that we have -   * parsed an entire message. -   */ - -  if (p->msg_buf[0] != MSG_EXTENDED) -  { -    reject = TRUE; -  } - -  /* -   * Even if we are an Ultra3 card, don't allow Ultra3 sync rates when -   * using the SDTR messages.  We need the PPR messages to enable the -   * higher speeds that include things like Dual Edge clocking. -   */ -  if (p->features & AHC_ULTRA2) -  { -    if ( (aic_inb(p, SBLKCTL) & ENAB40) && -         !(aic_inb(p, SSTAT2) & EXP_ACTIVE) ) -    { -      if (p->features & AHC_ULTRA3) -        maxsync = AHC_SYNCRATE_ULTRA3; -      else -        maxsync = AHC_SYNCRATE_ULTRA2; -    } -    else -    { -      maxsync = AHC_SYNCRATE_ULTRA; -    } -  } -  else if (p->features & AHC_ULTRA) -  { -    maxsync = AHC_SYNCRATE_ULTRA; -  } -  else -  { -    maxsync = AHC_SYNCRATE_FAST; -  } - -  /* -   * Just accept the length byte outright and perform -   * more checking once we know the message type. -   */ - -  if ( !reject && (p->msg_len > 2) ) -  { -    switch(p->msg_buf[2]) -    { -      case MSG_EXT_SDTR: -      { -         -        if (p->msg_buf[1] != MSG_EXT_SDTR_LEN) -        { -          reject = TRUE; -          break; -        } - -        if (p->msg_len < (MSG_EXT_SDTR_LEN + 2)) -        { -          break; -        } - -        period = new_period = p->msg_buf[3]; -        offset = new_offset = p->msg_buf[4]; -        trans_options = new_trans_options = 0; -        bus_width = new_bus_width = target_scsirate & WIDEXFER; - -        /* -         * If our current max syncrate is in the Ultra3 range, bump it back -         * down to Ultra2 since we can't negotiate DT transfers using SDTR -         */ -        if(maxsync == AHC_SYNCRATE_ULTRA3) -          maxsync = AHC_SYNCRATE_ULTRA2; - -        /* -         * We might have a device that is starting negotiation with us -         * before we can start up negotiation with it....be prepared to -         * have a device ask for a higher speed then we want to give it -         * in that case -         */ -        if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR)) != -             (SCB_MSGOUT_SENT|SCB_MSGOUT_SDTR) ) -        { -          if (!(aic_dev->flags & DEVICE_DTR_SCANNED)) -          { -            /* -             * We shouldn't get here unless this is a narrow drive, wide -             * devices should trigger this same section of code in the WDTR -             * handler first instead. -             */ -            aic_dev->goal.width = MSG_EXT_WDTR_BUS_8_BIT; -            aic_dev->goal.options = 0; -            if(p->user[tindex].offset) -            { -              aic_dev->needsdtr_copy = 1; -              aic_dev->goal.period = max_t(unsigned char, 10,p->user[tindex].period); -              if(p->features & AHC_ULTRA2) -              { -                aic_dev->goal.offset = MAX_OFFSET_ULTRA2; -              } -              else -              { -                aic_dev->goal.offset = MAX_OFFSET_8BIT; -              } -            } -            else -            { -              aic_dev->needsdtr_copy = 0; -              aic_dev->goal.period = 255; -              aic_dev->goal.offset = 0; -            } -            aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR; -          } -          else if (aic_dev->needsdtr_copy == 0) -          { -            /* -             * This is a preemptive message from the target, we've already -             * scanned this target and set our options for it, and we -             * don't need a SDTR with this target (for whatever reason), -             * so reject this incoming SDTR -             */ -            reject = TRUE; -            break; -          } - -          /* The device is sending this message first and we have to reply */ -          reply = TRUE; -           -          if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -          { -            printk(INFO_LEAD "Received pre-emptive SDTR message from " -                   "target.\n", p->host_no, CTL_OF_SCB(scb)); -          } -          /* -           * Validate the values the device passed to us against our SEEPROM -           * settings.  We don't have to do this if we aren't replying since -           * the device isn't allowed to send values greater than the ones -           * we first sent to it. -           */ -          new_period = max_t(unsigned int, period, aic_dev->goal.period); -          new_offset = min_t(unsigned int, offset, aic_dev->goal.offset); -        } -  -        /* -         * Use our new_period, new_offset, bus_width, and card options -         * to determine the actual syncrate settings -         */ -        syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync, -                                         &trans_options); -        aic7xxx_validate_offset(p, syncrate, &new_offset, bus_width); - -        /* -         * Did we drop to async?  If so, send a reply regardless of whether -         * or not we initiated this negotiation. -         */ -        if ((new_offset == 0) && (new_offset != offset)) -        { -          aic_dev->needsdtr_copy = 0; -          reply = TRUE; -        } -         -        /* -         * Did we start this, if not, or if we went too low and had to -         * go async, then send an SDTR back to the target -         */ -        if(reply) -        { -          /* when sending a reply, make sure that the goal settings are -           * updated along with current and active since the code that -           * will actually build the message for the sequencer uses the -           * goal settings as its guidelines. -           */ -          aic7xxx_set_syncrate(p, syncrate, target, channel, new_period, -                               new_offset, trans_options, -                               AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR, -			       aic_dev); -          scb->flags &= ~SCB_MSGOUT_BITS; -          scb->flags |= SCB_MSGOUT_SDTR; -          aic_outb(p, HOST_MSG, MSG_OUT); -          aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO); -        } -        else -        { -          aic7xxx_set_syncrate(p, syncrate, target, channel, new_period, -                               new_offset, trans_options, -                               AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev); -          aic_dev->needsdtr = 0; -        } -        done = TRUE; -        break; -      } -      case MSG_EXT_WDTR: -      { -           -        if (p->msg_buf[1] != MSG_EXT_WDTR_LEN) -        { -          reject = TRUE; -          break; -        } - -        if (p->msg_len < (MSG_EXT_WDTR_LEN + 2)) -        { -          break; -        } - -        bus_width = new_bus_width = p->msg_buf[3]; - -        if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR)) == -             (SCB_MSGOUT_SENT|SCB_MSGOUT_WDTR) ) -        { -          switch(bus_width) -          { -            default: -            { -              reject = TRUE; -              if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && -                   ((aic_dev->flags & DEVICE_PRINT_DTR) || -                    (aic7xxx_verbose > 0xffff)) ) -              { -                printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n", -                  p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width)); -              } -            } /* We fall through on purpose */ -            case MSG_EXT_WDTR_BUS_8_BIT: -            { -              aic_dev->goal.width = MSG_EXT_WDTR_BUS_8_BIT; -              aic_dev->needwdtr_copy &= ~target_mask; -              break; -            } -            case MSG_EXT_WDTR_BUS_16_BIT: -            { -              break; -            } -          } -          aic_dev->needwdtr = 0; -          aic7xxx_set_width(p, target, channel, lun, new_bus_width, -                            AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev); -        } -        else -        { -          if ( !(aic_dev->flags & DEVICE_DTR_SCANNED) ) -          { -            /*  -             * Well, we now know the WDTR and SYNC caps of this device since -             * it contacted us first, mark it as such and copy the user stuff -             * over to the goal stuff. -             */ -            if( (p->features & AHC_WIDE) && p->user[tindex].width ) -            { -              aic_dev->goal.width = MSG_EXT_WDTR_BUS_16_BIT; -              aic_dev->needwdtr_copy = 1; -            } -             -            /* -             * Devices that support DT transfers don't start WDTR requests -             */ -            aic_dev->goal.options = 0; - -            if(p->user[tindex].offset) -            { -              aic_dev->needsdtr_copy = 1; -              aic_dev->goal.period = max_t(unsigned char, 10, p->user[tindex].period); -              if(p->features & AHC_ULTRA2) -              { -                aic_dev->goal.offset = MAX_OFFSET_ULTRA2; -              } -              else if( aic_dev->goal.width ) -              { -                aic_dev->goal.offset = MAX_OFFSET_16BIT; -              } -              else -              { -                aic_dev->goal.offset = MAX_OFFSET_8BIT; -              } -            } else { -              aic_dev->needsdtr_copy = 0; -              aic_dev->goal.period = 255; -              aic_dev->goal.offset = 0; -            } -             -            aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR; -          } -          else if (aic_dev->needwdtr_copy == 0) -          { -            /* -             * This is a preemptive message from the target, we've already -             * scanned this target and set our options for it, and we -             * don't need a WDTR with this target (for whatever reason), -             * so reject this incoming WDTR -             */ -            reject = TRUE; -            break; -          } - -          /* The device is sending this message first and we have to reply */ -          reply = TRUE; - -          if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -          { -            printk(INFO_LEAD "Received pre-emptive WDTR message from " -                   "target.\n", p->host_no, CTL_OF_SCB(scb)); -          } -          switch(bus_width) -          { -            case MSG_EXT_WDTR_BUS_16_BIT: -            { -              if ( (p->features & AHC_WIDE) && -                   (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) ) -              { -                new_bus_width = MSG_EXT_WDTR_BUS_16_BIT; -                break; -              } -            } /* Fall through if we aren't a wide card */ -            default: -            case MSG_EXT_WDTR_BUS_8_BIT: -            { -              aic_dev->needwdtr_copy = 0; -              new_bus_width = MSG_EXT_WDTR_BUS_8_BIT; -              break; -            } -          } -          scb->flags &= ~SCB_MSGOUT_BITS; -          scb->flags |= SCB_MSGOUT_WDTR; -          aic_dev->needwdtr = 0; -          if(aic_dev->dtr_pending == 0) -          { -            /* there is no other command with SCB_DTR_SCB already set that will -             * trigger the release of the dtr_pending bit.  Both set the bit -             * and set scb->flags |= SCB_DTR_SCB -             */ -            aic_dev->dtr_pending = 1; -            scb->flags |= SCB_DTR_SCB; -          } -          aic_outb(p, HOST_MSG, MSG_OUT); -          aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO); -          /* when sending a reply, make sure that the goal settings are -           * updated along with current and active since the code that -           * will actually build the message for the sequencer uses the -           * goal settings as its guidelines. -           */ -          aic7xxx_set_width(p, target, channel, lun, new_bus_width, -                          AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR, -			  aic_dev); -        } -         -        /* -         * By virtue of the SCSI spec, a WDTR message negates any existing -         * SDTR negotiations.  So, even if needsdtr isn't marked for this -         * device, we still have to do a new SDTR message if the device -         * supports SDTR at all.  Therefore, we check needsdtr_copy instead -         * of needstr. -         */ -        aic7xxx_set_syncrate(p, NULL, target, channel, 0, 0, 0, -                             AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE, -			     aic_dev); -        aic_dev->needsdtr = aic_dev->needsdtr_copy; -        done = TRUE; -        break; -      } -      case MSG_EXT_PPR: -      { -         -        if (p->msg_buf[1] != MSG_EXT_PPR_LEN) -        { -          reject = TRUE; -          break; -        } - -        if (p->msg_len < (MSG_EXT_PPR_LEN + 2)) -        { -          break; -        } - -        period = new_period = p->msg_buf[3]; -        offset = new_offset = p->msg_buf[5]; -        bus_width = new_bus_width = p->msg_buf[6]; -        trans_options = new_trans_options = p->msg_buf[7] & 0xf; - -        if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) -        { -          printk(INFO_LEAD "Parsing PPR message (%d/%d/%d/%d)\n", -                 p->host_no, CTL_OF_SCB(scb), period, offset, bus_width, -                 trans_options); -        } - -        /* -         * We might have a device that is starting negotiation with us -         * before we can start up negotiation with it....be prepared to -         * have a device ask for a higher speed then we want to give it -         * in that case -         */ -        if ( (scb->flags & (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR)) != -             (SCB_MSGOUT_SENT|SCB_MSGOUT_PPR) ) -        {  -          /* Have we scanned the device yet? */ -          if (!(aic_dev->flags & DEVICE_DTR_SCANNED)) -          { -            /* The device is electing to use PPR messages, so we will too until -             * we know better */ -            aic_dev->needppr = aic_dev->needppr_copy = 1; -            aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; -            aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; -           -            /* We know the device is SCSI-3 compliant due to PPR */ -            aic_dev->flags |= DEVICE_SCSI_3; -           -            /* -             * Not only is the device starting this up, but it also hasn't -             * been scanned yet, so this would likely be our TUR or our -             * INQUIRY command at scan time, so we need to use the -             * settings from the SEEPROM if they existed.  Of course, even -             * if we didn't find a SEEPROM, we stuffed default values into -             * the user settings anyway, so use those in all cases. -             */ -            aic_dev->goal.width = p->user[tindex].width; -            if(p->user[tindex].offset) -            { -              aic_dev->goal.period = p->user[tindex].period; -              aic_dev->goal.options = p->user[tindex].options; -              if(p->features & AHC_ULTRA2) -              { -                aic_dev->goal.offset = MAX_OFFSET_ULTRA2; -              } -              else if( aic_dev->goal.width && -                       (bus_width == MSG_EXT_WDTR_BUS_16_BIT) && -                       p->features & AHC_WIDE ) -              { -                aic_dev->goal.offset = MAX_OFFSET_16BIT; -              } -              else -              { -                aic_dev->goal.offset = MAX_OFFSET_8BIT; -              } -            } -            else -            { -              aic_dev->goal.period = 255; -              aic_dev->goal.offset = 0; -              aic_dev->goal.options = 0; -            } -            aic_dev->flags |= DEVICE_DTR_SCANNED | DEVICE_PRINT_DTR; -          } -          else if (aic_dev->needppr_copy == 0) -          { -            /* -             * This is a preemptive message from the target, we've already -             * scanned this target and set our options for it, and we -             * don't need a PPR with this target (for whatever reason), -             * so reject this incoming PPR -             */ -            reject = TRUE; -            break; -          } - -          /* The device is sending this message first and we have to reply */ -          reply = TRUE; -           -          if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -          { -            printk(INFO_LEAD "Received pre-emptive PPR message from " -                   "target.\n", p->host_no, CTL_OF_SCB(scb)); -          } - -        } - -        switch(bus_width) -        { -          case MSG_EXT_WDTR_BUS_16_BIT: -          { -            if ( (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) && -			    p->features & AHC_WIDE) -            { -              break; -            } -          } -          default: -          { -            if ( (aic7xxx_verbose & VERBOSE_NEGOTIATION2) && -                 ((aic_dev->flags & DEVICE_PRINT_DTR) || -                  (aic7xxx_verbose > 0xffff)) ) -            { -              reply = TRUE; -              printk(INFO_LEAD "Requesting %d bit transfers, rejecting.\n", -                p->host_no, CTL_OF_SCB(scb), 8 * (0x01 << bus_width)); -            } -          } /* We fall through on purpose */ -          case MSG_EXT_WDTR_BUS_8_BIT: -          { -            /* -             * According to the spec, if we aren't wide, we also can't be -             * Dual Edge so clear the options byte -             */ -            new_trans_options = 0; -            new_bus_width = MSG_EXT_WDTR_BUS_8_BIT; -            break; -          } -        } - -        if(reply) -        { -          /* when sending a reply, make sure that the goal settings are -           * updated along with current and active since the code that -           * will actually build the message for the sequencer uses the -           * goal settings as its guidelines. -           */ -          aic7xxx_set_width(p, target, channel, lun, new_bus_width, -                            AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR, -			    aic_dev); -          syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync, -                                           &new_trans_options); -          aic7xxx_validate_offset(p, syncrate, &new_offset, new_bus_width); -          aic7xxx_set_syncrate(p, syncrate, target, channel, new_period, -                               new_offset, new_trans_options, -                               AHC_TRANS_GOAL|AHC_TRANS_ACTIVE|AHC_TRANS_CUR, -			       aic_dev); -        } -        else -        { -          aic7xxx_set_width(p, target, channel, lun, new_bus_width, -                            AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev); -          syncrate = aic7xxx_find_syncrate(p, &new_period, maxsync, -                                           &new_trans_options); -          aic7xxx_validate_offset(p, syncrate, &new_offset, new_bus_width); -          aic7xxx_set_syncrate(p, syncrate, target, channel, new_period, -                               new_offset, new_trans_options, -                               AHC_TRANS_ACTIVE|AHC_TRANS_CUR, aic_dev); -        } - -        /* -         * As it turns out, if we don't *have* to have PPR messages, then -         * configure ourselves not to use them since that makes some -         * external drive chassis work (those chassis can't parse PPR -         * messages and they mangle the SCSI bus until you send a WDTR -         * and SDTR that they can understand). -         */ -        if(new_trans_options == 0) -        { -          aic_dev->needppr = aic_dev->needppr_copy = 0; -          if(new_offset) -          { -            aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; -          } -          if (new_bus_width) -          { -            aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; -          } -        } - -        if((new_offset == 0) && (offset != 0)) -        { -          /* -           * Oops, the syncrate went to low for this card and we fell off -           * to async (should never happen with a device that uses PPR -           * messages, but have to be complete) -           */ -          reply = TRUE; -        } - -        if(reply) -        { -          scb->flags &= ~SCB_MSGOUT_BITS; -          scb->flags |= SCB_MSGOUT_PPR; -          aic_outb(p, HOST_MSG, MSG_OUT); -          aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO); -        } -        else -        { -          aic_dev->needppr = 0; -        } -        done = TRUE; -        break; -      } -      default: -      { -        reject = TRUE; -        break; -      } -    } /* end of switch(p->msg_type) */ -  } /* end of if (!reject && (p->msg_len > 2)) */ - -  if (!reply && reject) -  { -    aic_outb(p, MSG_MESSAGE_REJECT, MSG_OUT); -    aic_outb(p, aic_inb(p, SCSISIGO) | ATNO, SCSISIGO); -    done = TRUE; -  } -  return(done); -} - - -/*+F************************************************************************* - * Function: - *   aic7xxx_handle_reqinit - * - * Description: - *   Interrupt handler for REQINIT interrupts (used to transfer messages to - *    and from devices). - *_F*************************************************************************/ -static void -aic7xxx_handle_reqinit(struct aic7xxx_host *p, struct aic7xxx_scb *scb) -{ -  unsigned char lastbyte; -  unsigned char phasemis; -  int done = FALSE; - -  switch(p->msg_type) -  { -    case MSG_TYPE_INITIATOR_MSGOUT: -      { -        if (p->msg_len == 0) -          panic("aic7xxx: REQINIT with no active message!\n"); - -        lastbyte = (p->msg_index == (p->msg_len - 1)); -        phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK) != P_MESGOUT; - -        if (lastbyte || phasemis) -        { -          /* Time to end the message */ -          p->msg_len = 0; -          p->msg_type = MSG_TYPE_NONE; -          /* -           * NOTE-TO-MYSELF: If you clear the REQINIT after you -           * disable REQINITs, then cases of REJECT_MSG stop working -           * and hang the bus -           */ -          aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1); -          aic_outb(p, CLRSCSIINT, CLRINT); -          p->flags &= ~AHC_HANDLING_REQINITS; - -          if (phasemis == 0) -          { -            aic_outb(p, p->msg_buf[p->msg_index], SINDEX); -            aic_outb(p, 0, RETURN_1); -#ifdef AIC7XXX_VERBOSE_DEBUGGING -            if (aic7xxx_verbose > 0xffff) -              printk(INFO_LEAD "Completed sending of REQINIT message.\n", -                     p->host_no, CTL_OF_SCB(scb)); -#endif -          } -          else -          { -            aic_outb(p, MSGOUT_PHASEMIS, RETURN_1); -#ifdef AIC7XXX_VERBOSE_DEBUGGING -            if (aic7xxx_verbose > 0xffff) -              printk(INFO_LEAD "PHASEMIS while sending REQINIT message.\n", -                     p->host_no, CTL_OF_SCB(scb)); -#endif -          } -          unpause_sequencer(p, TRUE); -        } -        else -        { -          /* -           * Present the byte on the bus (clearing REQINIT) but don't -           * unpause the sequencer. -           */ -          aic_outb(p, CLRREQINIT, CLRSINT1); -          aic_outb(p, CLRSCSIINT, CLRINT); -          aic_outb(p,  p->msg_buf[p->msg_index++], SCSIDATL); -        } -        break; -      } -    case MSG_TYPE_INITIATOR_MSGIN: -      { -        phasemis = ( aic_inb(p, SCSISIGI) & PHASE_MASK ) != P_MESGIN; - -        if (phasemis == 0) -        { -          p->msg_len++; -          /* Pull the byte in without acking it */ -          p->msg_buf[p->msg_index] = aic_inb(p, SCSIBUSL); -          done = aic7xxx_parse_msg(p, scb); -          /* Ack the byte */ -          aic_outb(p, CLRREQINIT, CLRSINT1); -          aic_outb(p, CLRSCSIINT, CLRINT); -          aic_inb(p, SCSIDATL); -          p->msg_index++; -        } -        if (phasemis || done) -        { -#ifdef AIC7XXX_VERBOSE_DEBUGGING -          if (aic7xxx_verbose > 0xffff) -          { -            if (phasemis) -              printk(INFO_LEAD "PHASEMIS while receiving REQINIT message.\n", -                     p->host_no, CTL_OF_SCB(scb)); -            else -              printk(INFO_LEAD "Completed receipt of REQINIT message.\n", -                     p->host_no, CTL_OF_SCB(scb)); -          } -#endif -          /* Time to end our message session */ -          p->msg_len = 0; -          p->msg_type = MSG_TYPE_NONE; -          aic_outb(p, aic_inb(p, SIMODE1) & ~ENREQINIT, SIMODE1); -          aic_outb(p, CLRSCSIINT, CLRINT); -          p->flags &= ~AHC_HANDLING_REQINITS; -          unpause_sequencer(p, TRUE); -        } -        break; -      } -    default: -      { -        panic("aic7xxx: Unknown REQINIT message type.\n"); -        break; -      } -  } /* End of switch(p->msg_type) */ -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_handle_scsiint - * - * Description: - *   Interrupt handler for SCSI interrupts (SCSIINT). - *-F*************************************************************************/ -static void -aic7xxx_handle_scsiint(struct aic7xxx_host *p, unsigned char intstat) -{ -  unsigned char scb_index; -  unsigned char status; -  struct aic7xxx_scb *scb; -  struct aic_dev_data *aic_dev; - -  scb_index = aic_inb(p, SCB_TAG); -  status = aic_inb(p, SSTAT1); - -  if (scb_index < p->scb_data->numscbs) -  { -    scb = p->scb_data->scb_array[scb_index]; -    if ((scb->flags & SCB_ACTIVE) == 0) -    { -      scb = NULL; -    } -  } -  else -  { -    scb = NULL; -  } - - -  if ((status & SCSIRSTI) != 0) -  { -    int channel; - -    if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) -      channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3; -    else -      channel = 0; - -    if (aic7xxx_verbose & VERBOSE_RESET) -      printk(WARN_LEAD "Someone else reset the channel!!\n", -           p->host_no, channel, -1, -1); -    if (aic7xxx_panic_on_abort) -      aic7xxx_panic_abort(p, NULL); -    /* -     * Go through and abort all commands for the channel, but do not -     * reset the channel again. -     */ -    aic7xxx_reset_channel(p, channel, /* Initiate Reset */ FALSE); -    aic7xxx_run_done_queue(p, TRUE); -    scb = NULL; -  } -  else if ( ((status & BUSFREE) != 0) && ((status & SELTO) == 0) ) -  { -    /* -     * First look at what phase we were last in.  If it's message-out, -     * chances are pretty good that the bus free was in response to -     * one of our abort requests. -     */ -    unsigned char lastphase = aic_inb(p, LASTPHASE); -    unsigned char saved_tcl = aic_inb(p, SAVED_TCL); -    unsigned char target = (saved_tcl >> 4) & 0x0F; -    int channel; -    int printerror = TRUE; - -    if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) -      channel = (aic_inb(p, SBLKCTL) & SELBUSB) >> 3; -    else -      channel = 0; - -    aic_outb(p, aic_inb(p, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP), -             SCSISEQ); -    if (lastphase == P_MESGOUT) -    { -      unsigned char message; - -      message = aic_inb(p, SINDEX); - -      if ((message == MSG_ABORT) || (message == MSG_ABORT_TAG)) -      { -        if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS) -          printk(INFO_LEAD "SCB %d abort delivered.\n", p->host_no, -            CTL_OF_SCB(scb), scb->hscb->tag); -        aic7xxx_reset_device(p, target, channel, ALL_LUNS, -                (message == MSG_ABORT) ? SCB_LIST_NULL : scb->hscb->tag ); -        aic7xxx_run_done_queue(p, TRUE); -        scb = NULL; -        printerror = 0; -      } -      else if (message == MSG_BUS_DEV_RESET) -      { -        aic7xxx_handle_device_reset(p, target, channel); -        scb = NULL; -        printerror = 0; -      } -    } -    if ( (scb != NULL) && (scb->flags & SCB_DTR_SCB) )  -    { -      /* -       * Hmmm...error during a negotiation command.  Either we have a -       * borken bus, or the device doesn't like our negotiation message. -       * Since we check the INQUIRY data of a device before sending it -       * negotiation messages, assume the bus is borken for whatever -       * reason.  Complete the command. -       */ -      printerror = 0; -      aic7xxx_reset_device(p, target, channel, ALL_LUNS, scb->hscb->tag); -      aic7xxx_run_done_queue(p, TRUE); -      scb = NULL; -    } -    if (printerror != 0) -    { -      if (scb != NULL) -      { -        unsigned char tag; - -        if ((scb->hscb->control & TAG_ENB) != 0) -        { -          tag = scb->hscb->tag; -        } -        else -        { -          tag = SCB_LIST_NULL; -        } -        aic7xxx_reset_device(p, target, channel, ALL_LUNS, tag); -        aic7xxx_run_done_queue(p, TRUE); -      } -      else -      { -        aic7xxx_reset_device(p, target, channel, ALL_LUNS, SCB_LIST_NULL); -        aic7xxx_run_done_queue(p, TRUE); -      } -      printk(INFO_LEAD "Unexpected busfree, LASTPHASE = 0x%x, " -             "SEQADDR = 0x%x\n", p->host_no, channel, target, -1, lastphase, -             (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0)); -      scb = NULL; -    } -    aic_outb(p, MSG_NOOP, MSG_OUT); -    aic_outb(p, aic_inb(p, SIMODE1) & ~(ENBUSFREE|ENREQINIT), -      SIMODE1); -    p->flags &= ~AHC_HANDLING_REQINITS; -    aic_outb(p, CLRBUSFREE, CLRSINT1); -    aic_outb(p, CLRSCSIINT, CLRINT); -    restart_sequencer(p); -    unpause_sequencer(p, TRUE); -  } -  else if ((status & SELTO) != 0) -  { -	unsigned char scbptr; -	unsigned char nextscb; -	struct scsi_cmnd *cmd; - -    scbptr = aic_inb(p, WAITING_SCBH); -    if (scbptr > p->scb_data->maxhscbs) -    { -      /* -       * I'm still trying to track down exactly how this happens, but until -       * I find it, this code will make sure we aren't passing bogus values -       * into the SCBPTR register, even if that register will just wrap -       * things around, we still don't like having out of range variables. -       * -       * NOTE: Don't check the aic7xxx_verbose variable, I want this message -       * to always be displayed. -       */ -      printk(INFO_LEAD "Invalid WAITING_SCBH value %d, improvising.\n", -             p->host_no, -1, -1, -1, scbptr); -      if (p->scb_data->maxhscbs > 4) -        scbptr &= (p->scb_data->maxhscbs - 1); -      else -        scbptr &= 0x03; -    } -    aic_outb(p, scbptr, SCBPTR); -    scb_index = aic_inb(p, SCB_TAG); - -    scb = NULL; -    if (scb_index < p->scb_data->numscbs) -    { -      scb = p->scb_data->scb_array[scb_index]; -      if ((scb->flags & SCB_ACTIVE) == 0) -      { -        scb = NULL; -      } -    } -    if (scb == NULL) -    { -      printk(WARN_LEAD "Referenced SCB %d not valid during SELTO.\n", -             p->host_no, -1, -1, -1, scb_index); -      printk(KERN_WARNING "        SCSISEQ = 0x%x SEQADDR = 0x%x SSTAT0 = 0x%x " -             "SSTAT1 = 0x%x\n", aic_inb(p, SCSISEQ), -             aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), -             aic_inb(p, SSTAT0), aic_inb(p, SSTAT1)); -      if (aic7xxx_panic_on_abort) -        aic7xxx_panic_abort(p, NULL); -    } -    else -    { -      cmd = scb->cmd; -      cmd->result = (DID_TIME_OUT << 16); - -      /* -       * Clear out this hardware SCB -       */ -      aic_outb(p, 0, SCB_CONTROL); - -      /* -       * Clear out a few values in the card that are in an undetermined -       * state. -       */ -      aic_outb(p, MSG_NOOP, MSG_OUT); - -      /* -       * Shift the waiting for selection queue forward -       */ -      nextscb = aic_inb(p, SCB_NEXT); -      aic_outb(p, nextscb, WAITING_SCBH); - -      /* -       * Put this SCB back on the free list. -       */ -      aic7xxx_add_curscb_to_free_list(p); -#ifdef AIC7XXX_VERBOSE_DEBUGGING -      if (aic7xxx_verbose > 0xffff) -        printk(INFO_LEAD "Selection Timeout.\n", p->host_no, CTL_OF_SCB(scb)); -#endif -      if (scb->flags & SCB_QUEUED_ABORT) -      { -        /* -         * We know that this particular SCB had to be the queued abort since -         * the disconnected SCB would have gotten a reconnect instead. -         * What we need to do then is to let the command timeout again so -         * we get a reset since this abort just failed. -         */ -        cmd->result = 0; -        scb = NULL; -      } -    } -    /* -     * Keep the sequencer from trying to restart any selections -     */ -    aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ); -    /* -     * Make sure the data bits on the bus are released -     * Don't do this on 7770 chipsets, it makes them give us -     * a BRKADDRINT and kills the card. -     */ -    if( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI ) -      aic_outb(p, 0, SCSIBUSL); - -    /* -     * Delay for the selection timeout delay period then stop the selection -     */ -    udelay(301); -    aic_outb(p, CLRSELINGO, CLRSINT0); -    /* -     * Clear out all the interrupt status bits -     */ -    aic_outb(p, aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE), SIMODE1); -    p->flags &= ~AHC_HANDLING_REQINITS; -    aic_outb(p, CLRSELTIMEO | CLRBUSFREE, CLRSINT1); -    aic_outb(p, CLRSCSIINT, CLRINT); -    /* -     * Restarting the sequencer will stop the selection and make sure devices -     * are allowed to reselect in. -     */ -    restart_sequencer(p); -    unpause_sequencer(p, TRUE); -  } -  else if (scb == NULL) -  { -    printk(WARN_LEAD "aic7xxx_isr - referenced scb not valid " -           "during scsiint 0x%x scb(%d)\n" -           "      SIMODE0 0x%x, SIMODE1 0x%x, SSTAT0 0x%x, SEQADDR 0x%x\n", -           p->host_no, -1, -1, -1, status, scb_index, aic_inb(p, SIMODE0), -           aic_inb(p, SIMODE1), aic_inb(p, SSTAT0), -           (aic_inb(p, SEQADDR1) << 8) | aic_inb(p, SEQADDR0)); -    /* -     * Turn off the interrupt and set status to zero, so that it -     * falls through the rest of the SCSIINT code. -     */ -    aic_outb(p, status, CLRSINT1); -    aic_outb(p, CLRSCSIINT, CLRINT); -    unpause_sequencer(p, /* unpause always */ TRUE); -    scb = NULL; -  } -  else if (status & SCSIPERR) -  { -    /* -     * Determine the bus phase and queue an appropriate message. -     */ -	char  *phase; -	struct scsi_cmnd *cmd; -	unsigned char mesg_out = MSG_NOOP; -	unsigned char lastphase = aic_inb(p, LASTPHASE); -	unsigned char sstat2 = aic_inb(p, SSTAT2); - -    cmd = scb->cmd; -    switch (lastphase) -    { -      case P_DATAOUT: -        phase = "Data-Out"; -        break; -      case P_DATAIN: -        phase = "Data-In"; -        mesg_out = MSG_INITIATOR_DET_ERR; -        break; -      case P_COMMAND: -        phase = "Command"; -        break; -      case P_MESGOUT: -        phase = "Message-Out"; -        break; -      case P_STATUS: -        phase = "Status"; -        mesg_out = MSG_INITIATOR_DET_ERR; -        break; -      case P_MESGIN: -        phase = "Message-In"; -        mesg_out = MSG_PARITY_ERROR; -        break; -      default: -        phase = "unknown"; -        break; -    } - -    /* -     * A parity error has occurred during a data -     * transfer phase. Flag it and continue. -     */ -    if( (p->features & AHC_ULTRA3) &&  -        (aic_inb(p, SCSIRATE) & AHC_SYNCRATE_CRC) && -        (lastphase == P_DATAIN) ) -    { -      printk(WARN_LEAD "CRC error during %s phase.\n", -             p->host_no, CTL_OF_SCB(scb), phase); -      if(sstat2 & CRCVALERR) -      { -        printk(WARN_LEAD "  CRC error in intermediate CRC packet.\n", -               p->host_no, CTL_OF_SCB(scb)); -      } -      if(sstat2 & CRCENDERR) -      { -        printk(WARN_LEAD "  CRC error in ending CRC packet.\n", -               p->host_no, CTL_OF_SCB(scb)); -      } -      if(sstat2 & CRCREQERR) -      { -        printk(WARN_LEAD "  Target incorrectly requested a CRC packet.\n", -               p->host_no, CTL_OF_SCB(scb)); -      } -      if(sstat2 & DUAL_EDGE_ERROR) -      { -        printk(WARN_LEAD "  Dual Edge transmission error.\n", -               p->host_no, CTL_OF_SCB(scb)); -      } -    } -    else if( (lastphase == P_MESGOUT) && -             (scb->flags & SCB_MSGOUT_PPR) ) -    { -      /* -       * As per the draft specs, any device capable of supporting any of -       * the option values other than 0 are not allowed to reject the -       * PPR message.  Instead, they must negotiate out what they do -       * support instead of rejecting our offering or else they cause -       * a parity error during msg_out phase to signal that they don't -       * like our settings. -       */ -      aic_dev = AIC_DEV(scb->cmd); -      aic_dev->needppr = aic_dev->needppr_copy = 0; -      aic7xxx_set_width(p, scb->cmd->device->id, scb->cmd->device->channel, scb->cmd->device->lun, -                        MSG_EXT_WDTR_BUS_8_BIT, -                        (AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE), -			aic_dev); -      aic7xxx_set_syncrate(p, NULL, scb->cmd->device->id, scb->cmd->device->channel, 0, 0, -                           0, AHC_TRANS_ACTIVE|AHC_TRANS_CUR|AHC_TRANS_QUITE, -			   aic_dev); -      aic_dev->goal.options = 0; -      scb->flags &= ~SCB_MSGOUT_BITS; -      if(aic7xxx_verbose & VERBOSE_NEGOTIATION2) -      { -        printk(INFO_LEAD "parity error during PPR message, reverting " -               "to WDTR/SDTR\n", p->host_no, CTL_OF_SCB(scb)); -      } -      if ( aic_dev->goal.width ) -      { -        aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; -      } -      if ( aic_dev->goal.offset ) -      { -        if( aic_dev->goal.period <= 9 ) -        { -          aic_dev->goal.period = 10; -        } -        aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; -      } -      scb = NULL; -    } - -    /* -     * We've set the hardware to assert ATN if we get a parity -     * error on "in" phases, so all we need to do is stuff the -     * message buffer with the appropriate message.  "In" phases -     * have set mesg_out to something other than MSG_NOP. -     */ -    if (mesg_out != MSG_NOOP) -    { -      aic_outb(p, mesg_out, MSG_OUT); -      aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); -      scb = NULL; -    } -    aic_outb(p, CLRSCSIPERR, CLRSINT1); -    aic_outb(p, CLRSCSIINT, CLRINT); -    unpause_sequencer(p, /* unpause_always */ TRUE); -  } -  else if ( (status & REQINIT) && -            (p->flags & AHC_HANDLING_REQINITS) ) -  { -#ifdef AIC7XXX_VERBOSE_DEBUGGING -    if (aic7xxx_verbose > 0xffff) -      printk(INFO_LEAD "Handling REQINIT, SSTAT1=0x%x.\n", p->host_no, -             CTL_OF_SCB(scb), aic_inb(p, SSTAT1)); -#endif -    aic7xxx_handle_reqinit(p, scb); -    return; -  } -  else -  { -    /* -     * We don't know what's going on. Turn off the -     * interrupt source and try to continue. -     */ -    if (aic7xxx_verbose & VERBOSE_SCSIINT) -      printk(INFO_LEAD "Unknown SCSIINT status, SSTAT1(0x%x).\n", -        p->host_no, -1, -1, -1, status); -    aic_outb(p, status, CLRSINT1); -    aic_outb(p, CLRSCSIINT, CLRINT); -    unpause_sequencer(p, /* unpause always */ TRUE); -    scb = NULL; -  } -  if (scb != NULL) -  { -    aic7xxx_done(p, scb); -  } -} - -#ifdef AIC7XXX_VERBOSE_DEBUGGING -static void -aic7xxx_check_scbs(struct aic7xxx_host *p, char *buffer) -{ -  unsigned char saved_scbptr, free_scbh, dis_scbh, wait_scbh, temp; -  int i, bogus, lost; -  static unsigned char scb_status[AIC7XXX_MAXSCB]; - -#define SCB_NO_LIST 0 -#define SCB_FREE_LIST 1 -#define SCB_WAITING_LIST 2 -#define SCB_DISCONNECTED_LIST 4 -#define SCB_CURRENTLY_ACTIVE 8 - -  /* -   * Note, these checks will fail on a regular basis once the machine moves -   * beyond the bus scan phase.  The problem is race conditions concerning -   * the scbs and where they are linked in.  When you have 30 or so commands -   * outstanding on the bus, and run this twice with every interrupt, the -   * chances get pretty good that you'll catch the sequencer with an SCB -   * only partially linked in.  Therefore, once we pass the scan phase -   * of the bus, we really should disable this function. -   */ -  bogus = FALSE; -  memset(&scb_status[0], 0, sizeof(scb_status)); -  pause_sequencer(p); -  saved_scbptr = aic_inb(p, SCBPTR); -  if (saved_scbptr >= p->scb_data->maxhscbs) -  { -    printk("Bogus SCBPTR %d\n", saved_scbptr); -    bogus = TRUE; -  } -  scb_status[saved_scbptr] = SCB_CURRENTLY_ACTIVE; -  free_scbh = aic_inb(p, FREE_SCBH); -  if ( (free_scbh != SCB_LIST_NULL) && -       (free_scbh >= p->scb_data->maxhscbs) ) -  { -    printk("Bogus FREE_SCBH %d\n", free_scbh); -    bogus = TRUE; -  } -  else -  { -    temp = free_scbh; -    while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) ) -    { -      if(scb_status[temp] & 0x07) -      { -        printk("HSCB %d on multiple lists, status 0x%02x", temp, -               scb_status[temp] | SCB_FREE_LIST); -        bogus = TRUE; -      } -      scb_status[temp] |= SCB_FREE_LIST; -      aic_outb(p, temp, SCBPTR); -      temp = aic_inb(p, SCB_NEXT); -    } -  } - -  dis_scbh = aic_inb(p, DISCONNECTED_SCBH); -  if ( (dis_scbh != SCB_LIST_NULL) && -       (dis_scbh >= p->scb_data->maxhscbs) ) -  { -    printk("Bogus DISCONNECTED_SCBH %d\n", dis_scbh); -    bogus = TRUE; -  } -  else -  { -    temp = dis_scbh; -    while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) ) -    { -      if(scb_status[temp] & 0x07) -      { -        printk("HSCB %d on multiple lists, status 0x%02x", temp, -               scb_status[temp] | SCB_DISCONNECTED_LIST); -        bogus = TRUE; -      } -      scb_status[temp] |= SCB_DISCONNECTED_LIST; -      aic_outb(p, temp, SCBPTR); -      temp = aic_inb(p, SCB_NEXT); -    } -  } -   -  wait_scbh = aic_inb(p, WAITING_SCBH); -  if ( (wait_scbh != SCB_LIST_NULL) && -       (wait_scbh >= p->scb_data->maxhscbs) ) -  { -    printk("Bogus WAITING_SCBH %d\n", wait_scbh); -    bogus = TRUE; -  } -  else -  { -    temp = wait_scbh; -    while( (temp != SCB_LIST_NULL) && (temp < p->scb_data->maxhscbs) ) -    { -      if(scb_status[temp] & 0x07) -      { -        printk("HSCB %d on multiple lists, status 0x%02x", temp, -               scb_status[temp] | SCB_WAITING_LIST); -        bogus = TRUE; -      } -      scb_status[temp] |= SCB_WAITING_LIST; -      aic_outb(p, temp, SCBPTR); -      temp = aic_inb(p, SCB_NEXT); -    } -  } - -  lost=0; -  for(i=0; i < p->scb_data->maxhscbs; i++) -  { -    aic_outb(p, i, SCBPTR); -    temp = aic_inb(p, SCB_NEXT); -    if ( ((temp != SCB_LIST_NULL) && -          (temp >= p->scb_data->maxhscbs)) ) -    { -      printk("HSCB %d bad, SCB_NEXT invalid(%d).\n", i, temp); -      bogus = TRUE; -    } -    if ( temp == i ) -    { -      printk("HSCB %d bad, SCB_NEXT points to self.\n", i); -      bogus = TRUE; -    } -    if (scb_status[i] == 0) -      lost++; -    if (lost > 1) -    { -      printk("Too many lost scbs.\n"); -      bogus=TRUE; -    } -  } -  aic_outb(p, saved_scbptr, SCBPTR); -  unpause_sequencer(p, FALSE); -  if (bogus) -  { -    printk("Bogus parameters found in card SCB array structures.\n"); -    printk("%s\n", buffer); -    aic7xxx_panic_abort(p, NULL); -  } -  return; -} -#endif - - -/*+F************************************************************************* - * Function: - *   aic7xxx_handle_command_completion_intr - * - * Description: - *   SCSI command completion interrupt handler. - *-F*************************************************************************/ -static void -aic7xxx_handle_command_completion_intr(struct aic7xxx_host *p) -{ -	struct aic7xxx_scb *scb = NULL; -	struct aic_dev_data *aic_dev; -	struct scsi_cmnd *cmd; -	unsigned char scb_index, tindex; - -#ifdef AIC7XXX_VERBOSE_DEBUGGING -  if( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) ) -    printk(INFO_LEAD "Command Complete Int.\n", p->host_no, -1, -1, -1); -#endif -     -  /* -   * Read the INTSTAT location after clearing the CMDINT bit.  This forces -   * any posted PCI writes to flush to memory.  Gerard Roudier suggested -   * this fix to the possible race of clearing the CMDINT bit but not -   * having all command bytes flushed onto the qoutfifo. -   */ -  aic_outb(p, CLRCMDINT, CLRINT); -  aic_inb(p, INTSTAT); -  /* -   * The sequencer will continue running when it -   * issues this interrupt. There may be >1 commands -   * finished, so loop until we've processed them all. -   */ - -  while (p->qoutfifo[p->qoutfifonext] != SCB_LIST_NULL) -  { -    scb_index = p->qoutfifo[p->qoutfifonext]; -    p->qoutfifo[p->qoutfifonext++] = SCB_LIST_NULL; -    if ( scb_index >= p->scb_data->numscbs ) -    { -      printk(WARN_LEAD "CMDCMPLT with invalid SCB index %d\n", p->host_no, -        -1, -1, -1, scb_index); -      continue; -    } -    scb = p->scb_data->scb_array[scb_index]; -    if (!(scb->flags & SCB_ACTIVE) || (scb->cmd == NULL)) -    { -      printk(WARN_LEAD "CMDCMPLT without command for SCB %d, SCB flags " -        "0x%x, cmd 0x%lx\n", p->host_no, -1, -1, -1, scb_index, scb->flags, -        (unsigned long) scb->cmd); -      continue; -    } -    tindex = TARGET_INDEX(scb->cmd); -    aic_dev = AIC_DEV(scb->cmd); -    if (scb->flags & SCB_QUEUED_ABORT) -    { -      pause_sequencer(p); -      if ( ((aic_inb(p, LASTPHASE) & PHASE_MASK) != P_BUSFREE) && -           (aic_inb(p, SCB_TAG) == scb->hscb->tag) ) -      { -        unpause_sequencer(p, FALSE); -        continue; -      } -      aic7xxx_reset_device(p, scb->cmd->device->id, scb->cmd->device->channel, -        scb->cmd->device->lun, scb->hscb->tag); -      scb->flags &= ~(SCB_QUEUED_FOR_DONE | SCB_RESET | SCB_ABORT | -        SCB_QUEUED_ABORT); -      unpause_sequencer(p, FALSE); -    } -    else if (scb->flags & SCB_ABORT) -    { -      /* -       * We started to abort this, but it completed on us, let it -       * through as successful -       */ -      scb->flags &= ~(SCB_ABORT|SCB_RESET); -    } -    else if (scb->flags & SCB_SENSE) -    { -      char *buffer = &scb->cmd->sense_buffer[0]; - -      if (buffer[12] == 0x47 || buffer[12] == 0x54) -      { -        /* -         * Signal that we need to re-negotiate things. -         */ -        aic_dev->needppr = aic_dev->needppr_copy; -        aic_dev->needsdtr = aic_dev->needsdtr_copy; -        aic_dev->needwdtr = aic_dev->needwdtr_copy; -      } -    } -    cmd = scb->cmd; -    if (scb->hscb->residual_SG_segment_count != 0) -    { -      aic7xxx_calculate_residual(p, scb); -    } -    cmd->result |= (aic7xxx_error(cmd) << 16); -    aic7xxx_done(p, scb); -  } -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_isr - * - * Description: - *   SCSI controller interrupt handler. - *-F*************************************************************************/ -static void -aic7xxx_isr(void *dev_id) -{ -  struct aic7xxx_host *p; -  unsigned char intstat; - -  p = dev_id; - -  /* -   * Just a few sanity checks.  Make sure that we have an int pending. -   * Also, if PCI, then we are going to check for a PCI bus error status -   * should we get too many spurious interrupts. -   */ -  if (!((intstat = aic_inb(p, INTSTAT)) & INT_PEND)) -  { -#ifdef CONFIG_PCI -    if ( (p->chip & AHC_PCI) && (p->spurious_int > 500) && -        !(p->flags & AHC_HANDLING_REQINITS) ) -    { -      if ( aic_inb(p, ERROR) & PCIERRSTAT ) -      { -        aic7xxx_pci_intr(p); -      } -      p->spurious_int = 0; -    } -    else if ( !(p->flags & AHC_HANDLING_REQINITS) ) -    { -      p->spurious_int++; -    } -#endif -    return; -  } - -  p->spurious_int = 0; - -  /* -   * Keep track of interrupts for /proc/scsi -   */ -  p->isr_count++; - -#ifdef AIC7XXX_VERBOSE_DEBUGGING -  if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) && -       (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) ) -    aic7xxx_check_scbs(p, "Bogus settings at start of interrupt."); -#endif - -  /* -   * Handle all the interrupt sources - especially for SCSI -   * interrupts, we won't get a second chance at them. -   */ -  if (intstat & CMDCMPLT) -  { -    aic7xxx_handle_command_completion_intr(p); -  } - -  if (intstat & BRKADRINT) -  { -    int i; -    unsigned char errno = aic_inb(p, ERROR); - -    printk(KERN_ERR "(scsi%d) BRKADRINT error(0x%x):\n", p->host_no, errno); -    for (i = 0; i < ARRAY_SIZE(hard_error); i++) -    { -      if (errno & hard_error[i].errno) -      { -        printk(KERN_ERR "  %s\n", hard_error[i].errmesg); -      } -    } -    printk(KERN_ERR "(scsi%d)   SEQADDR=0x%x\n", p->host_no, -      (((aic_inb(p, SEQADDR1) << 8) & 0x100) | aic_inb(p, SEQADDR0))); -    if (aic7xxx_panic_on_abort) -      aic7xxx_panic_abort(p, NULL); -#ifdef CONFIG_PCI -    if (errno & PCIERRSTAT) -      aic7xxx_pci_intr(p); -#endif -    if (errno & (SQPARERR | ILLOPCODE | ILLSADDR)) -    { -      panic("aic7xxx: unrecoverable BRKADRINT.\n"); -    } -    if (errno & ILLHADDR) -    { -      printk(KERN_ERR "(scsi%d) BUG! Driver accessed chip without first " -             "pausing controller!\n", p->host_no); -    } -#ifdef AIC7XXX_VERBOSE_DEBUGGING -    if (errno & DPARERR) -    { -      if (aic_inb(p, DMAPARAMS) & DIRECTION) -        printk("(scsi%d) while DMAing SCB from host to card.\n", p->host_no); -      else -        printk("(scsi%d) while DMAing SCB from card to host.\n", p->host_no); -    } -#endif -    aic_outb(p, CLRPARERR | CLRBRKADRINT, CLRINT); -    unpause_sequencer(p, FALSE); -  } - -  if (intstat & SEQINT) -  { -    /* -     * Read the CCSCBCTL register to work around a bug in the Ultra2 cards -     */ -    if(p->features & AHC_ULTRA2) -    { -      aic_inb(p, CCSCBCTL); -    } -    aic7xxx_handle_seqint(p, intstat); -  } - -  if (intstat & SCSIINT) -  { -    aic7xxx_handle_scsiint(p, intstat); -  } - -#ifdef AIC7XXX_VERBOSE_DEBUGGING -  if ( (p->isr_count < 16) && (aic7xxx_verbose > 0xffff) && -       (aic7xxx_panic_on_abort) && (p->flags & AHC_PAGESCBS) ) -    aic7xxx_check_scbs(p, "Bogus settings at end of interrupt."); -#endif - -} - -/*+F************************************************************************* - * Function: - *   do_aic7xxx_isr - * - * Description: - *   This is a gross hack to solve a problem in linux kernels 2.1.85 and - *   above.  Please, children, do not try this at home, and if you ever see - *   anything like it, please inform the Gross Hack Police immediately - *-F*************************************************************************/ -static irqreturn_t -do_aic7xxx_isr(int irq, void *dev_id) -{ -  unsigned long cpu_flags; -  struct aic7xxx_host *p; -   -  p = dev_id; -  if(!p) -    return IRQ_NONE; -  spin_lock_irqsave(p->host->host_lock, cpu_flags); -  p->flags |= AHC_IN_ISR; -  do -  { -    aic7xxx_isr(dev_id); -  } while ( (aic_inb(p, INTSTAT) & INT_PEND) ); -  aic7xxx_done_cmds_complete(p); -  aic7xxx_run_waiting_queues(p); -  p->flags &= ~AHC_IN_ISR; -  spin_unlock_irqrestore(p->host->host_lock, cpu_flags); - -  return IRQ_HANDLED; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_init_transinfo - * - * Description: - *   Set up the initial aic_dev values from the BIOS settings and from - *   INQUIRY results - *-F*************************************************************************/ -static void -aic7xxx_init_transinfo(struct aic7xxx_host *p, struct aic_dev_data *aic_dev) -{ -  struct scsi_device *sdpnt = aic_dev->SDptr; -  unsigned char tindex; - -  tindex = sdpnt->id | (sdpnt->channel << 3); -  if (!(aic_dev->flags & DEVICE_DTR_SCANNED)) -  { -    aic_dev->flags |= DEVICE_DTR_SCANNED; - -    if ( sdpnt->wdtr && (p->features & AHC_WIDE) ) -    { -      aic_dev->needwdtr = aic_dev->needwdtr_copy = 1; -      aic_dev->goal.width = p->user[tindex].width; -    } -    else -    { -      aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; -      pause_sequencer(p); -      aic7xxx_set_width(p, sdpnt->id, sdpnt->channel, sdpnt->lun, -                        MSG_EXT_WDTR_BUS_8_BIT, (AHC_TRANS_ACTIVE | -                                                 AHC_TRANS_GOAL | -                                                 AHC_TRANS_CUR), aic_dev ); -      unpause_sequencer(p, FALSE); -    } -    if ( sdpnt->sdtr && p->user[tindex].offset ) -    { -      aic_dev->goal.period = p->user[tindex].period; -      aic_dev->goal.options = p->user[tindex].options; -      if (p->features & AHC_ULTRA2) -        aic_dev->goal.offset = MAX_OFFSET_ULTRA2; -      else if (aic_dev->goal.width == MSG_EXT_WDTR_BUS_16_BIT) -        aic_dev->goal.offset = MAX_OFFSET_16BIT; -      else -        aic_dev->goal.offset = MAX_OFFSET_8BIT; -      if ( sdpnt->ppr && p->user[tindex].period <= 9 && -             p->user[tindex].options ) -      { -        aic_dev->needppr = aic_dev->needppr_copy = 1; -        aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; -        aic_dev->needwdtr = aic_dev->needwdtr_copy = 0; -        aic_dev->flags |= DEVICE_SCSI_3; -      } -      else -      { -        aic_dev->needsdtr = aic_dev->needsdtr_copy = 1; -        aic_dev->goal.period = max_t(unsigned char, 10, aic_dev->goal.period); -        aic_dev->goal.options = 0; -      } -    } -    else -    { -      aic_dev->needsdtr = aic_dev->needsdtr_copy = 0; -      aic_dev->goal.period = 255; -      aic_dev->goal.offset = 0; -      aic_dev->goal.options = 0; -    } -    aic_dev->flags |= DEVICE_PRINT_DTR; -  } -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_slave_alloc - * - * Description: - *   Set up the initial aic_dev struct pointers - *-F*************************************************************************/ -static int -aic7xxx_slave_alloc(struct scsi_device *SDptr) -{ -  struct aic7xxx_host *p = (struct aic7xxx_host *)SDptr->host->hostdata; -  struct aic_dev_data *aic_dev; - -  aic_dev = kmalloc(sizeof(struct aic_dev_data), GFP_KERNEL); -  if(!aic_dev) -    return 1; -  /* -   * Check to see if channel was scanned. -   */ -   -  if (!(p->flags & AHC_A_SCANNED) && (SDptr->channel == 0)) -  { -    if (aic7xxx_verbose & VERBOSE_PROBE2) -      printk(INFO_LEAD "Scanning channel for devices.\n", -        p->host_no, 0, -1, -1); -    p->flags |= AHC_A_SCANNED; -  } -  else -  { -    if (!(p->flags & AHC_B_SCANNED) && (SDptr->channel == 1)) -    { -      if (aic7xxx_verbose & VERBOSE_PROBE2) -        printk(INFO_LEAD "Scanning channel for devices.\n", -          p->host_no, 1, -1, -1); -      p->flags |= AHC_B_SCANNED; -    } -  } - -  memset(aic_dev, 0, sizeof(struct aic_dev_data)); -  SDptr->hostdata = aic_dev; -  aic_dev->SDptr = SDptr; -  aic_dev->max_q_depth = 1; -  aic_dev->temp_q_depth = 1; -  scbq_init(&aic_dev->delayed_scbs); -  INIT_LIST_HEAD(&aic_dev->list); -  list_add_tail(&aic_dev->list, &p->aic_devs); -  return 0; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_device_queue_depth - * - * Description: - *   Determines the queue depth for a given device.  There are two ways - *   a queue depth can be obtained for a tagged queueing device.  One - *   way is the default queue depth which is determined by whether - *   aic7xxx_default_queue_depth.  The other is by the aic7xxx_tag_info - *   array. - * - *   If tagged queueing isn't supported on the device, then we set the - *   depth to p->host->hostt->cmd_per_lun for internal driver queueing. - *   as the default queue depth.  Otherwise, we use either 4 or 8 as the - *   default queue depth (dependent on the number of hardware SCBs). - *   The other way we determine queue depth is through the use of the - *   aic7xxx_tag_info array which is enabled by defining - *   AIC7XXX_TAGGED_QUEUEING_BY_DEVICE.  This array can be initialized - *   with queue depths for individual devices.  It also allows tagged - *   queueing to be [en|dis]abled for a specific adapter. - *-F*************************************************************************/ -static void -aic7xxx_device_queue_depth(struct aic7xxx_host *p, struct scsi_device *device) -{ -  int tag_enabled = FALSE; -  struct aic_dev_data *aic_dev = device->hostdata; -  unsigned char tindex; - -  tindex = device->id | (device->channel << 3); - -  if (device->simple_tags) -    return; // We've already enabled this device - -  if (device->tagged_supported) -  { -    tag_enabled = TRUE; - -    if (!(p->discenable & (1 << tindex))) -    { -      if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -        printk(INFO_LEAD "Disconnection disabled, unable to " -             "enable tagged queueing.\n", -             p->host_no, device->channel, device->id, device->lun); -      tag_enabled = FALSE; -    } -    else -    { -      if (p->instance >= ARRAY_SIZE(aic7xxx_tag_info)) -      { -        static int print_warning = TRUE; -        if(print_warning) -        { -          printk(KERN_INFO "aic7xxx: WARNING, insufficient tag_info instances for" -                           " installed controllers.\n"); -          printk(KERN_INFO "aic7xxx: Please update the aic7xxx_tag_info array in" -                           " the aic7xxx.c source file.\n"); -          print_warning = FALSE; -        } -        aic_dev->max_q_depth = aic_dev->temp_q_depth = -		aic7xxx_default_queue_depth; -      } -      else -      { - -        if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 255) -        { -          tag_enabled = FALSE; -        } -        else if (aic7xxx_tag_info[p->instance].tag_commands[tindex] == 0) -        { -          aic_dev->max_q_depth = aic_dev->temp_q_depth = -		  aic7xxx_default_queue_depth; -        } -        else -        { -          aic_dev->max_q_depth = aic_dev->temp_q_depth =  -            aic7xxx_tag_info[p->instance].tag_commands[tindex]; -        } -      } -    } -  } -  if (tag_enabled) -  { -    if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -    { -          printk(INFO_LEAD "Tagged queuing enabled, queue depth %d.\n", -            p->host_no, device->channel, device->id, -            device->lun, aic_dev->max_q_depth); -    } -    scsi_adjust_queue_depth(device, MSG_ORDERED_TAG, aic_dev->max_q_depth); -  } -  else -  { -    if (aic7xxx_verbose & VERBOSE_NEGOTIATION2) -    { -          printk(INFO_LEAD "Tagged queuing disabled, queue depth %d.\n", -            p->host_no, device->channel, device->id, -            device->lun, device->host->cmd_per_lun); -    } -    scsi_adjust_queue_depth(device, 0, device->host->cmd_per_lun); -  } -  return; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_slave_destroy - * - * Description: - *   prepare for this device to go away - *-F*************************************************************************/ -static void -aic7xxx_slave_destroy(struct scsi_device *SDptr) -{ -  struct aic_dev_data *aic_dev = SDptr->hostdata; - -  list_del(&aic_dev->list); -  SDptr->hostdata = NULL; -  kfree(aic_dev); -  return; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_slave_configure - * - * Description: - *   Configure the device we are attaching to the controller.  This is - *   where we get to do things like scan the INQUIRY data, set queue - *   depths, allocate command structs, etc. - *-F*************************************************************************/ -static int -aic7xxx_slave_configure(struct scsi_device *SDptr) -{ -  struct aic7xxx_host *p = (struct aic7xxx_host *) SDptr->host->hostdata; -  struct aic_dev_data *aic_dev; -  int scbnum; - -  aic_dev = (struct aic_dev_data *)SDptr->hostdata; - -  aic7xxx_init_transinfo(p, aic_dev); -  aic7xxx_device_queue_depth(p, SDptr); -  if(list_empty(&aic_dev->list)) -    list_add_tail(&aic_dev->list, &p->aic_devs); - -  scbnum = 0; -  list_for_each_entry(aic_dev, &p->aic_devs, list) { -    scbnum += aic_dev->max_q_depth; -  } -  while (scbnum > p->scb_data->numscbs) -  { -    /* -     * Pre-allocate the needed SCBs to get around the possibility of having -     * to allocate some when memory is more or less exhausted and we need -     * the SCB in order to perform a swap operation (possible deadlock) -     */ -    if ( aic7xxx_allocate_scb(p) == 0 ) -      break; -  } - - -  return(0); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_probe - * - * Description: - *   Probing for EISA boards: it looks like the first two bytes - *   are a manufacturer code - three characters, five bits each: - * - *               BYTE 0   BYTE 1   BYTE 2   BYTE 3 - *              ?1111122 22233333 PPPPPPPP RRRRRRRR - * - *   The characters are baselined off ASCII '@', so add that value - *   to each to get the real ASCII code for it. The next two bytes - *   appear to be a product and revision number, probably vendor- - *   specific. This is what is being searched for at each port, - *   and what should probably correspond to the ID= field in the - *   ECU's .cfg file for the card - if your card is not detected, - *   make sure your signature is listed in the array. - * - *   The fourth byte's lowest bit seems to be an enabled/disabled - *   flag (rest of the bits are reserved?). - * - * NOTE:  This function is only needed on Intel and Alpha platforms, - *   the other platforms we support don't have EISA/VLB busses.  So, - *   we #ifdef this entire function to avoid compiler warnings about - *   an unused function. - *-F*************************************************************************/ -#if defined(__i386__) || defined(__alpha__) -static int -aic7xxx_probe(int slot, int base, ahc_flag_type *flags) -{ -  int i; -  unsigned char buf[4]; - -  static struct { -    int n; -    unsigned char signature[sizeof(buf)]; -    ahc_chip type; -    int bios_disabled; -  } AIC7xxx[] = { -    { 4, { 0x04, 0x90, 0x77, 0x70 }, -      AHC_AIC7770|AHC_EISA, FALSE },  /* mb 7770  */ -    { 4, { 0x04, 0x90, 0x77, 0x71 }, -      AHC_AIC7770|AHC_EISA, FALSE }, /* host adapter 274x */ -    { 4, { 0x04, 0x90, 0x77, 0x56 }, -      AHC_AIC7770|AHC_VL, FALSE }, /* 284x BIOS enabled */ -    { 4, { 0x04, 0x90, 0x77, 0x57 }, -      AHC_AIC7770|AHC_VL, TRUE }   /* 284x BIOS disabled */ -  }; - -  /* -   * The VL-bus cards need to be primed by -   * writing before a signature check. -   */ -  for (i = 0; i < sizeof(buf); i++) -  { -    outb(0x80 + i, base); -    buf[i] = inb(base + i); -  } - -  for (i = 0; i < ARRAY_SIZE(AIC7xxx); i++) -  { -    /* -     * Signature match on enabled card? -     */ -    if (!memcmp(buf, AIC7xxx[i].signature, AIC7xxx[i].n)) -    { -      if (inb(base + 4) & 1) -      { -        if (AIC7xxx[i].bios_disabled) -        { -          *flags |= AHC_USEDEFAULTS; -        } -        else -        { -          *flags |= AHC_BIOS_ENABLED; -        } -        return (i); -      } - -      printk("aic7xxx: <Adaptec 7770 SCSI Host Adapter> " -             "disabled at slot %d, ignored.\n", slot); -    } -  } - -  return (-1); -} -#endif /* (__i386__) || (__alpha__) */ - - -/*+F************************************************************************* - * Function: - *   read_2840_seeprom - * - * Description: - *   Reads the 2840 serial EEPROM and returns 1 if successful and 0 if - *   not successful. - * - *   See read_seeprom (for the 2940) for the instruction set of the 93C46 - *   chip. - * - *   The 2840 interface to the 93C46 serial EEPROM is through the - *   STATUS_2840 and SEECTL_2840 registers.  The CS_2840, CK_2840, and - *   DO_2840 bits of the SEECTL_2840 register are connected to the chip - *   select, clock, and data out lines respectively of the serial EEPROM. - *   The DI_2840 bit of the STATUS_2840 is connected to the data in line - *   of the serial EEPROM.  The EEPROM_TF bit of STATUS_2840 register is - *   useful in that it gives us an 800 nsec timer.  After a read from the - *   SEECTL_2840 register the timing flag is cleared and goes high 800 nsec - *   later. - *-F*************************************************************************/ -static int -read_284x_seeprom(struct aic7xxx_host *p, struct seeprom_config *sc) -{ -  int i = 0, k = 0; -  unsigned char temp; -  unsigned short checksum = 0; -  unsigned short *seeprom = (unsigned short *) sc; -  struct seeprom_cmd { -    unsigned char len; -    unsigned char bits[3]; -  }; -  struct seeprom_cmd seeprom_read = {3, {1, 1, 0}}; - -#define CLOCK_PULSE(p) \ -  while ((aic_inb(p, STATUS_2840) & EEPROM_TF) == 0)        \ -  {                                                \ -    ;  /* Do nothing */                                \ -  }                                                \ -  (void) aic_inb(p, SEECTL_2840); - -  /* -   * Read the first 32 registers of the seeprom.  For the 2840, -   * the 93C46 SEEPROM is a 1024-bit device with 64 16-bit registers -   * but only the first 32 are used by Adaptec BIOS.  The loop -   * will range from 0 to 31. -   */ -  for (k = 0; k < (sizeof(*sc) / 2); k++) -  { -    /* -     * Send chip select for one clock cycle. -     */ -    aic_outb(p, CK_2840 | CS_2840, SEECTL_2840); -    CLOCK_PULSE(p); - -    /* -     * Now we're ready to send the read command followed by the -     * address of the 16-bit register we want to read. -     */ -    for (i = 0; i < seeprom_read.len; i++) -    { -      temp = CS_2840 | seeprom_read.bits[i]; -      aic_outb(p, temp, SEECTL_2840); -      CLOCK_PULSE(p); -      temp = temp ^ CK_2840; -      aic_outb(p, temp, SEECTL_2840); -      CLOCK_PULSE(p); -    } -    /* -     * Send the 6 bit address (MSB first, LSB last). -     */ -    for (i = 5; i >= 0; i--) -    { -      temp = k; -      temp = (temp >> i) & 1;  /* Mask out all but lower bit. */ -      temp = CS_2840 | temp; -      aic_outb(p, temp, SEECTL_2840); -      CLOCK_PULSE(p); -      temp = temp ^ CK_2840; -      aic_outb(p, temp, SEECTL_2840); -      CLOCK_PULSE(p); -    } - -    /* -     * Now read the 16 bit register.  An initial 0 precedes the -     * register contents which begins with bit 15 (MSB) and ends -     * with bit 0 (LSB).  The initial 0 will be shifted off the -     * top of our word as we let the loop run from 0 to 16. -     */ -    for (i = 0; i <= 16; i++) -    { -      temp = CS_2840; -      aic_outb(p, temp, SEECTL_2840); -      CLOCK_PULSE(p); -      temp = temp ^ CK_2840; -      seeprom[k] = (seeprom[k] << 1) | (aic_inb(p, STATUS_2840) & DI_2840); -      aic_outb(p, temp, SEECTL_2840); -      CLOCK_PULSE(p); -    } -    /* -     * The serial EEPROM has a checksum in the last word.  Keep a -     * running checksum for all words read except for the last -     * word.  We'll verify the checksum after all words have been -     * read. -     */ -    if (k < (sizeof(*sc) / 2) - 1) -    { -      checksum = checksum + seeprom[k]; -    } - -    /* -     * Reset the chip select for the next command cycle. -     */ -    aic_outb(p, 0, SEECTL_2840); -    CLOCK_PULSE(p); -    aic_outb(p, CK_2840, SEECTL_2840); -    CLOCK_PULSE(p); -    aic_outb(p, 0, SEECTL_2840); -    CLOCK_PULSE(p); -  } - -#if 0 -  printk("Computed checksum 0x%x, checksum read 0x%x\n", checksum, sc->checksum); -  printk("Serial EEPROM:"); -  for (k = 0; k < (sizeof(*sc) / 2); k++) -  { -    if (((k % 8) == 0) && (k != 0)) -    { -      printk("\n              "); -    } -    printk(" 0x%x", seeprom[k]); -  } -  printk("\n"); -#endif - -  if (checksum != sc->checksum) -  { -    printk("aic7xxx: SEEPROM checksum error, ignoring SEEPROM settings.\n"); -    return (0); -  } - -  return (1); -#undef CLOCK_PULSE -} - -#define CLOCK_PULSE(p)                                               \ -  do {                                                               \ -    int limit = 0;                                                   \ -    do {                                                             \ -      mb();                                                          \ -      pause_sequencer(p);  /* This is just to generate some PCI */   \ -                           /* traffic so the PCI read is flushed */  \ -                           /* it shouldn't be needed, but some */    \ -                           /* chipsets do indeed appear to need */   \ -                           /* something to force PCI reads to get */ \ -                           /* flushed */                             \ -      udelay(1);           /* Do nothing */                          \ -    } while (((aic_inb(p, SEECTL) & SEERDY) == 0) && (++limit < 1000)); \ -  } while(0) - -/*+F************************************************************************* - * Function: - *   acquire_seeprom - * - * Description: - *   Acquires access to the memory port on PCI controllers. - *-F*************************************************************************/ -static int -acquire_seeprom(struct aic7xxx_host *p) -{ - -  /* -   * Request access of the memory port.  When access is -   * granted, SEERDY will go high.  We use a 1 second -   * timeout which should be near 1 second more than -   * is needed.  Reason: after the 7870 chip reset, there -   * should be no contention. -   */ -  aic_outb(p, SEEMS, SEECTL); -  CLOCK_PULSE(p); -  if ((aic_inb(p, SEECTL) & SEERDY) == 0) -  { -    aic_outb(p, 0, SEECTL); -    return (0); -  } -  return (1); -} - -/*+F************************************************************************* - * Function: - *   release_seeprom - * - * Description: - *   Releases access to the memory port on PCI controllers. - *-F*************************************************************************/ -static void -release_seeprom(struct aic7xxx_host *p) -{ -  /* -   * Make sure the SEEPROM is ready before we release it. -   */ -  CLOCK_PULSE(p); -  aic_outb(p, 0, SEECTL); -} - -/*+F************************************************************************* - * Function: - *   read_seeprom - * - * Description: - *   Reads the serial EEPROM and returns 1 if successful and 0 if - *   not successful. - * - *   The instruction set of the 93C46/56/66 chips is as follows: - * - *               Start  OP - *     Function   Bit  Code  Address    Data     Description - *     ------------------------------------------------------------------- - *     READ        1    10   A5 - A0             Reads data stored in memory, - *                                               starting at specified address - *     EWEN        1    00   11XXXX              Write enable must precede - *                                               all programming modes - *     ERASE       1    11   A5 - A0             Erase register A5A4A3A2A1A0 - *     WRITE       1    01   A5 - A0   D15 - D0  Writes register - *     ERAL        1    00   10XXXX              Erase all registers - *     WRAL        1    00   01XXXX    D15 - D0  Writes to all registers - *     EWDS        1    00   00XXXX              Disables all programming - *                                               instructions - *     *Note: A value of X for address is a don't care condition. - *     *Note: The 93C56 and 93C66 have 8 address bits. - *  - * - *   The 93C46 has a four wire interface: clock, chip select, data in, and - *   data out.  In order to perform one of the above functions, you need - *   to enable the chip select for a clock period (typically a minimum of - *   1 usec, with the clock high and low a minimum of 750 and 250 nsec - *   respectively.  While the chip select remains high, you can clock in - *   the instructions (above) starting with the start bit, followed by the - *   OP code, Address, and Data (if needed).  For the READ instruction, the - *   requested 16-bit register contents is read from the data out line but - *   is preceded by an initial zero (leading 0, followed by 16-bits, MSB - *   first).  The clock cycling from low to high initiates the next data - *   bit to be sent from the chip. - * - *   The 78xx interface to the 93C46 serial EEPROM is through the SEECTL - *   register.  After successful arbitration for the memory port, the - *   SEECS bit of the SEECTL register is connected to the chip select. - *   The SEECK, SEEDO, and SEEDI are connected to the clock, data out, - *   and data in lines respectively.  The SEERDY bit of SEECTL is useful - *   in that it gives us an 800 nsec timer.  After a write to the SEECTL - *   register, the SEERDY goes high 800 nsec later.  The one exception - *   to this is when we first request access to the memory port.  The - *   SEERDY goes high to signify that access has been granted and, for - *   this case, has no implied timing. - *-F*************************************************************************/ -static int -read_seeprom(struct aic7xxx_host *p, int offset,  -    unsigned short *scarray, unsigned int len, seeprom_chip_type chip) -{ -  int i = 0, k; -  unsigned char temp; -  unsigned short checksum = 0; -  struct seeprom_cmd { -    unsigned char len; -    unsigned char bits[3]; -  }; -  struct seeprom_cmd seeprom_read = {3, {1, 1, 0}}; - -  /* -   * Request access of the memory port. -   */ -  if (acquire_seeprom(p) == 0) -  { -    return (0); -  } - -  /* -   * Read 'len' registers of the seeprom.  For the 7870, the 93C46 -   * SEEPROM is a 1024-bit device with 64 16-bit registers but only -   * the first 32 are used by Adaptec BIOS.  Some adapters use the -   * 93C56 SEEPROM which is a 2048-bit device.  The loop will range -   * from 0 to 'len' - 1. -   */ -  for (k = 0; k < len; k++) -  { -    /* -     * Send chip select for one clock cycle. -     */ -    aic_outb(p, SEEMS | SEECK | SEECS, SEECTL); -    CLOCK_PULSE(p); - -    /* -     * Now we're ready to send the read command followed by the -     * address of the 16-bit register we want to read. -     */ -    for (i = 0; i < seeprom_read.len; i++) -    { -      temp = SEEMS | SEECS | (seeprom_read.bits[i] << 1); -      aic_outb(p, temp, SEECTL); -      CLOCK_PULSE(p); -      temp = temp ^ SEECK; -      aic_outb(p, temp, SEECTL); -      CLOCK_PULSE(p); -    } -    /* -     * Send the 6 or 8 bit address (MSB first, LSB last). -     */ -    for (i = ((int) chip - 1); i >= 0; i--) -    { -      temp = k + offset; -      temp = (temp >> i) & 1;  /* Mask out all but lower bit. */ -      temp = SEEMS | SEECS | (temp << 1); -      aic_outb(p, temp, SEECTL); -      CLOCK_PULSE(p); -      temp = temp ^ SEECK; -      aic_outb(p, temp, SEECTL); -      CLOCK_PULSE(p); -    } - -    /* -     * Now read the 16 bit register.  An initial 0 precedes the -     * register contents which begins with bit 15 (MSB) and ends -     * with bit 0 (LSB).  The initial 0 will be shifted off the -     * top of our word as we let the loop run from 0 to 16. -     */ -    for (i = 0; i <= 16; i++) -    { -      temp = SEEMS | SEECS; -      aic_outb(p, temp, SEECTL); -      CLOCK_PULSE(p); -      temp = temp ^ SEECK; -      scarray[k] = (scarray[k] << 1) | (aic_inb(p, SEECTL) & SEEDI); -      aic_outb(p, temp, SEECTL); -      CLOCK_PULSE(p); -    } - -    /* -     * The serial EEPROM should have a checksum in the last word. -     * Keep a running checksum for all words read except for the -     * last word.  We'll verify the checksum after all words have -     * been read. -     */ -    if (k < (len - 1)) -    { -      checksum = checksum + scarray[k]; -    } - -    /* -     * Reset the chip select for the next command cycle. -     */ -    aic_outb(p, SEEMS, SEECTL); -    CLOCK_PULSE(p); -    aic_outb(p, SEEMS | SEECK, SEECTL); -    CLOCK_PULSE(p); -    aic_outb(p, SEEMS, SEECTL); -    CLOCK_PULSE(p); -  } - -  /* -   * Release access to the memory port and the serial EEPROM. -   */ -  release_seeprom(p); - -#if 0 -  printk("Computed checksum 0x%x, checksum read 0x%x\n", -         checksum, scarray[len - 1]); -  printk("Serial EEPROM:"); -  for (k = 0; k < len; k++) -  { -    if (((k % 8) == 0) && (k != 0)) -    { -      printk("\n              "); -    } -    printk(" 0x%x", scarray[k]); -  } -  printk("\n"); -#endif -  if ( (checksum != scarray[len - 1]) || (checksum == 0) ) -  { -    return (0); -  } - -  return (1); -} - -/*+F************************************************************************* - * Function: - *   read_brdctl - * - * Description: - *   Reads the BRDCTL register. - *-F*************************************************************************/ -static unsigned char -read_brdctl(struct aic7xxx_host *p) -{ -  unsigned char brdctl, value; - -  /* -   * Make sure the SEEPROM is ready before we access it -   */ -  CLOCK_PULSE(p); -  if (p->features & AHC_ULTRA2) -  { -    brdctl = BRDRW_ULTRA2; -    aic_outb(p, brdctl, BRDCTL); -    CLOCK_PULSE(p); -    value = aic_inb(p, BRDCTL); -    CLOCK_PULSE(p); -    return(value); -  } -  brdctl = BRDRW; -  if ( !((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) || -        (p->flags & AHC_CHNLB) ) -  { -    brdctl |= BRDCS; -  } -  aic_outb(p, brdctl, BRDCTL); -  CLOCK_PULSE(p); -  value = aic_inb(p, BRDCTL); -  CLOCK_PULSE(p); -  aic_outb(p, 0, BRDCTL); -  CLOCK_PULSE(p); -  return (value); -} - -/*+F************************************************************************* - * Function: - *   write_brdctl - * - * Description: - *   Writes a value to the BRDCTL register. - *-F*************************************************************************/ -static void -write_brdctl(struct aic7xxx_host *p, unsigned char value) -{ -  unsigned char brdctl; - -  /* -   * Make sure the SEEPROM is ready before we access it -   */ -  CLOCK_PULSE(p); -  if (p->features & AHC_ULTRA2) -  { -    brdctl = value; -    aic_outb(p, brdctl, BRDCTL); -    CLOCK_PULSE(p); -    brdctl |= BRDSTB_ULTRA2; -    aic_outb(p, brdctl, BRDCTL); -    CLOCK_PULSE(p); -    brdctl &= ~BRDSTB_ULTRA2; -    aic_outb(p, brdctl, BRDCTL); -    CLOCK_PULSE(p); -    read_brdctl(p); -    CLOCK_PULSE(p); -  } -  else -  { -    brdctl = BRDSTB; -    if ( !((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) || -          (p->flags & AHC_CHNLB) ) -    { -      brdctl |= BRDCS; -    } -    brdctl = BRDSTB | BRDCS; -    aic_outb(p, brdctl, BRDCTL); -    CLOCK_PULSE(p); -    brdctl |= value; -    aic_outb(p, brdctl, BRDCTL); -    CLOCK_PULSE(p); -    brdctl &= ~BRDSTB; -    aic_outb(p, brdctl, BRDCTL); -    CLOCK_PULSE(p); -    brdctl &= ~BRDCS; -    aic_outb(p, brdctl, BRDCTL); -    CLOCK_PULSE(p); -  } -} - -/*+F************************************************************************* - * Function: - *   aic785x_cable_detect - * - * Description: - *   Detect the cables that are present on aic785x class controller chips - *-F*************************************************************************/ -static void -aic785x_cable_detect(struct aic7xxx_host *p, int *int_50, -    int *ext_present, int *eeprom) -{ -  unsigned char brdctl; - -  aic_outb(p, BRDRW | BRDCS, BRDCTL); -  CLOCK_PULSE(p); -  aic_outb(p, 0, BRDCTL); -  CLOCK_PULSE(p); -  brdctl = aic_inb(p, BRDCTL); -  CLOCK_PULSE(p); -  *int_50 = !(brdctl & BRDDAT5); -  *ext_present = !(brdctl & BRDDAT6); -  *eeprom = (aic_inb(p, SPIOCAP) & EEPROM); -} - -#undef CLOCK_PULSE - -/*+F************************************************************************* - * Function: - *   aic2940_uwpro_cable_detect - * - * Description: - *   Detect the cables that are present on the 2940-UWPro cards - * - * NOTE: This function assumes the SEEPROM will have already been acquired - *       prior to invocation of this function. - *-F*************************************************************************/ -static void -aic2940_uwpro_wide_cable_detect(struct aic7xxx_host *p, int *int_68, -    int *ext_68, int *eeprom) -{ -  unsigned char brdctl; - -  /* -   * First read the status of our cables.  Set the rom bank to -   * 0 since the bank setting serves as a multiplexor for the -   * cable detection logic.  BRDDAT5 controls the bank switch. -   */ -  write_brdctl(p, 0); - -  /* -   * Now we read the state of the internal 68 connector.  BRDDAT6 -   * is don't care, BRDDAT7 is internal 68.  The cable is -   * present if the bit is 0 -   */ -  brdctl = read_brdctl(p); -  *int_68 = !(brdctl & BRDDAT7); - -  /* -   * Set the bank bit in brdctl and then read the external cable state -   * and the EEPROM status -   */ -  write_brdctl(p, BRDDAT5); -  brdctl = read_brdctl(p); - -  *ext_68 = !(brdctl & BRDDAT6); -  *eeprom = !(brdctl & BRDDAT7); - -  /* -   * We're done, the calling function will release the SEEPROM for us -   */ -} - -/*+F************************************************************************* - * Function: - *   aic787x_cable_detect - * - * Description: - *   Detect the cables that are present on aic787x class controller chips - * - * NOTE: This function assumes the SEEPROM will have already been acquired - *       prior to invocation of this function. - *-F*************************************************************************/ -static void -aic787x_cable_detect(struct aic7xxx_host *p, int *int_50, int *int_68, -    int *ext_present, int *eeprom) -{ -  unsigned char brdctl; - -  /* -   * First read the status of our cables.  Set the rom bank to -   * 0 since the bank setting serves as a multiplexor for the -   * cable detection logic.  BRDDAT5 controls the bank switch. -   */ -  write_brdctl(p, 0); - -  /* -   * Now we read the state of the two internal connectors.  BRDDAT6 -   * is internal 50, BRDDAT7 is internal 68.  For each, the cable is -   * present if the bit is 0 -   */ -  brdctl = read_brdctl(p); -  *int_50 = !(brdctl & BRDDAT6); -  *int_68 = !(brdctl & BRDDAT7); - -  /* -   * Set the bank bit in brdctl and then read the external cable state -   * and the EEPROM status -   */ -  write_brdctl(p, BRDDAT5); -  brdctl = read_brdctl(p); - -  *ext_present = !(brdctl & BRDDAT6); -  *eeprom = !(brdctl & BRDDAT7); - -  /* -   * We're done, the calling function will release the SEEPROM for us -   */ -} - -/*+F************************************************************************* - * Function: - *   aic787x_ultra2_term_detect - * - * Description: - *   Detect the termination settings present on ultra2 class controllers - * - * NOTE: This function assumes the SEEPROM will have already been acquired - *       prior to invocation of this function. - *-F*************************************************************************/ -static void -aic7xxx_ultra2_term_detect(struct aic7xxx_host *p, int *enableSE_low, -                           int *enableSE_high, int *enableLVD_low, -                           int *enableLVD_high, int *eprom_present) -{ -  unsigned char brdctl; - -  brdctl = read_brdctl(p); - -  *eprom_present  = (brdctl & BRDDAT7); -  *enableSE_high  = (brdctl & BRDDAT6); -  *enableSE_low   = (brdctl & BRDDAT5); -  *enableLVD_high = (brdctl & BRDDAT4); -  *enableLVD_low  = (brdctl & BRDDAT3); -} - -/*+F************************************************************************* - * Function: - *   configure_termination - * - * Description: - *   Configures the termination settings on PCI adapters that have - *   SEEPROMs available. - *-F*************************************************************************/ -static void -configure_termination(struct aic7xxx_host *p) -{ -  int internal50_present = 0; -  int internal68_present = 0; -  int external_present = 0; -  int eprom_present = 0; -  int enableSE_low = 0; -  int enableSE_high = 0; -  int enableLVD_low = 0; -  int enableLVD_high = 0; -  unsigned char brddat = 0; -  unsigned char max_target = 0; -  unsigned char sxfrctl1 = aic_inb(p, SXFRCTL1); - -  if (acquire_seeprom(p)) -  { -    if (p->features & (AHC_WIDE|AHC_TWIN)) -      max_target = 16; -    else -      max_target = 8; -    aic_outb(p, SEEMS | SEECS, SEECTL); -    sxfrctl1 &= ~STPWEN; -    /* -     * The termination/cable detection logic is split into three distinct -     * groups.  Ultra2 and later controllers, 2940UW-Pro controllers, and -     * older 7850, 7860, 7870, 7880, and 7895 controllers.  Each has its -     * own unique way of detecting their cables and writing the results -     * back to the card. -     */ -    if (p->features & AHC_ULTRA2) -    { -      /* -       * As long as user hasn't overridden term settings, always check the -       * cable detection logic -       */ -      if (aic7xxx_override_term == -1) -      { -        aic7xxx_ultra2_term_detect(p, &enableSE_low, &enableSE_high, -                                   &enableLVD_low, &enableLVD_high, -                                   &eprom_present); -      } -       -      /* -       * If the user is overriding settings, then they have been preserved -       * to here as fake adapter_control entries.  Parse them and allow -       * them to override the detected settings (if we even did detection). -       */ -      if (!(p->adapter_control & CFSEAUTOTERM)) -      { -        enableSE_low = (p->adapter_control & CFSTERM); -        enableSE_high = (p->adapter_control & CFWSTERM); -      } -      if (!(p->adapter_control & CFAUTOTERM)) -      { -        enableLVD_low = enableLVD_high = (p->adapter_control & CFLVDSTERM); -      } - -      /* -       * Now take those settings that we have and translate them into the -       * values that must be written into the registers. -       * -       * Flash Enable = BRDDAT7 -       * Secondary High Term Enable = BRDDAT6 -       * Secondary Low Term Enable = BRDDAT5 -       * LVD/Primary High Term Enable = BRDDAT4 -       * LVD/Primary Low Term Enable = STPWEN bit in SXFRCTL1 -       */ -      if (enableLVD_low != 0) -      { -        sxfrctl1 |= STPWEN; -        p->flags |= AHC_TERM_ENB_LVD; -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk(KERN_INFO "(scsi%d) LVD/Primary Low byte termination " -                 "Enabled\n", p->host_no); -      } -           -      if (enableLVD_high != 0) -      { -        brddat |= BRDDAT4; -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk(KERN_INFO "(scsi%d) LVD/Primary High byte termination " -                 "Enabled\n", p->host_no); -      } - -      if (enableSE_low != 0) -      { -        brddat |= BRDDAT5; -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk(KERN_INFO "(scsi%d) Secondary Low byte termination " -                 "Enabled\n", p->host_no); -      } - -      if (enableSE_high != 0) -      { -        brddat |= BRDDAT6; -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk(KERN_INFO "(scsi%d) Secondary High byte termination " -                 "Enabled\n", p->host_no); -      } -    } -    else if (p->features & AHC_NEW_AUTOTERM) -    { -      /* -       * The 50 pin connector termination is controlled by STPWEN in the -       * SXFRCTL1 register.  Since the Adaptec docs typically say the -       * controller is not allowed to be in the middle of a cable and -       * this is the only connection on that stub of the bus, there is -       * no need to even check for narrow termination, it's simply -       * always on. -       */ -      sxfrctl1 |= STPWEN; -      if (aic7xxx_verbose & VERBOSE_PROBE2) -        printk(KERN_INFO "(scsi%d) Narrow channel termination Enabled\n", -               p->host_no); - -      if (p->adapter_control & CFAUTOTERM) -      { -        aic2940_uwpro_wide_cable_detect(p, &internal68_present, -                                        &external_present, -                                        &eprom_present); -        printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Int-68 %s, " -               "Ext-68 %s)\n", p->host_no, -               "Don't Care", -               internal68_present ? "YES" : "NO", -               external_present ? "YES" : "NO"); -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk(KERN_INFO "(scsi%d) EEPROM %s present.\n", p->host_no, -               eprom_present ? "is" : "is not"); -        if (internal68_present && external_present) -        { -          brddat = 0; -          p->flags &= ~AHC_TERM_ENB_SE_HIGH; -          if (aic7xxx_verbose & VERBOSE_PROBE2) -            printk(KERN_INFO "(scsi%d) Wide channel termination Disabled\n", -                   p->host_no); -        } -        else -        { -          brddat = BRDDAT6; -          p->flags |= AHC_TERM_ENB_SE_HIGH; -          if (aic7xxx_verbose & VERBOSE_PROBE2) -            printk(KERN_INFO "(scsi%d) Wide channel termination Enabled\n", -                   p->host_no); -        } -      } -      else -      { -        /* -         * The termination of the Wide channel is done more like normal -         * though, and the setting of this termination is done by writing -         * either a 0 or 1 to BRDDAT6 of the BRDDAT register -         */ -        if (p->adapter_control & CFWSTERM) -        { -          brddat = BRDDAT6; -          p->flags |= AHC_TERM_ENB_SE_HIGH; -          if (aic7xxx_verbose & VERBOSE_PROBE2) -            printk(KERN_INFO "(scsi%d) Wide channel termination Enabled\n", -                   p->host_no); -        } -        else -        { -          brddat = 0; -        } -      } -    } -    else -    { -      if (p->adapter_control & CFAUTOTERM) -      { -        if (p->flags & AHC_MOTHERBOARD) -        { -          printk(KERN_INFO "(scsi%d) Warning - detected auto-termination\n", -                 p->host_no); -          printk(KERN_INFO "(scsi%d) Please verify driver detected settings " -            "are correct.\n", p->host_no); -          printk(KERN_INFO "(scsi%d) If not, then please properly set the " -            "device termination\n", p->host_no); -          printk(KERN_INFO "(scsi%d) in the Adaptec SCSI BIOS by hitting " -            "CTRL-A when prompted\n", p->host_no); -          printk(KERN_INFO "(scsi%d) during machine bootup.\n", p->host_no); -        } -        /* Configure auto termination. */ - -        if ( (p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870 ) -        { -          aic787x_cable_detect(p, &internal50_present, &internal68_present, -            &external_present, &eprom_present); -        } -        else -        { -          aic785x_cable_detect(p, &internal50_present, &external_present, -            &eprom_present); -        } - -        if (max_target <= 8) -          internal68_present = 0; - -        if (max_target > 8) -        { -          printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Int-68 %s, " -                 "Ext-68 %s)\n", p->host_no, -                 internal50_present ? "YES" : "NO", -                 internal68_present ? "YES" : "NO", -                 external_present ? "YES" : "NO"); -        } -        else -        { -          printk(KERN_INFO "(scsi%d) Cables present (Int-50 %s, Ext-50 %s)\n", -                 p->host_no, -                 internal50_present ? "YES" : "NO", -                 external_present ? "YES" : "NO"); -        } -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk(KERN_INFO "(scsi%d) EEPROM %s present.\n", p->host_no, -               eprom_present ? "is" : "is not"); - -        /* -         * Now set the termination based on what we found.  BRDDAT6 -         * controls wide termination enable. -         * Flash Enable = BRDDAT7 -         * SE High Term Enable = BRDDAT6 -         */ -        if (internal50_present && internal68_present && external_present) -        { -          printk(KERN_INFO "(scsi%d) Illegal cable configuration!!  Only two\n", -                 p->host_no); -          printk(KERN_INFO "(scsi%d) connectors on the SCSI controller may be " -                 "in use at a time!\n", p->host_no); -          /* -           * Force termination (low and high byte) on.  This is safer than -           * leaving it completely off, especially since this message comes -           * most often from motherboard controllers that don't even have 3 -           * connectors, but instead are failing the cable detection. -           */ -          internal50_present = external_present = 0; -          enableSE_high = enableSE_low = 1; -        } - -        if ((max_target > 8) && -            ((external_present == 0) || (internal68_present == 0)) ) -        { -          brddat |= BRDDAT6; -          p->flags |= AHC_TERM_ENB_SE_HIGH; -          if (aic7xxx_verbose & VERBOSE_PROBE2) -            printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n", -                   p->host_no); -        } - -        if ( ((internal50_present ? 1 : 0) + -              (internal68_present ? 1 : 0) + -              (external_present   ? 1 : 0)) <= 1 ) -        { -          sxfrctl1 |= STPWEN; -          p->flags |= AHC_TERM_ENB_SE_LOW; -          if (aic7xxx_verbose & VERBOSE_PROBE2) -            printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n", -                   p->host_no); -        } -      } -      else /* p->adapter_control & CFAUTOTERM */ -      { -        if (p->adapter_control & CFSTERM) -        { -          sxfrctl1 |= STPWEN; -          if (aic7xxx_verbose & VERBOSE_PROBE2) -            printk(KERN_INFO "(scsi%d) SE Low byte termination Enabled\n", -                   p->host_no); -        } - -        if (p->adapter_control & CFWSTERM) -        { -          brddat |= BRDDAT6; -          if (aic7xxx_verbose & VERBOSE_PROBE2) -            printk(KERN_INFO "(scsi%d) SE High byte termination Enabled\n", -                   p->host_no); -        } -      } -    } - -    aic_outb(p, sxfrctl1, SXFRCTL1); -    write_brdctl(p, brddat); -    release_seeprom(p); -  } -} - -/*+F************************************************************************* - * Function: - *   detect_maxscb - * - * Description: - *   Detects the maximum number of SCBs for the controller and returns - *   the count and a mask in p (p->maxscbs, p->qcntmask). - *-F*************************************************************************/ -static void -detect_maxscb(struct aic7xxx_host *p) -{ -  int i; - -  /* -   * It's possible that we've already done this for multichannel -   * adapters. -   */ -  if (p->scb_data->maxhscbs == 0) -  { -    /* -     * We haven't initialized the SCB settings yet.  Walk the SCBs to -     * determince how many there are. -     */ -    aic_outb(p, 0, FREE_SCBH); - -    for (i = 0; i < AIC7XXX_MAXSCB; i++) -    { -      aic_outb(p, i, SCBPTR); -      aic_outb(p, i, SCB_CONTROL); -      if (aic_inb(p, SCB_CONTROL) != i) -        break; -      aic_outb(p, 0, SCBPTR); -      if (aic_inb(p, SCB_CONTROL) != 0) -        break; - -      aic_outb(p, i, SCBPTR); -      aic_outb(p, 0, SCB_CONTROL);   /* Clear the control byte. */ -      aic_outb(p, i + 1, SCB_NEXT);  /* Set the next pointer. */ -      aic_outb(p, SCB_LIST_NULL, SCB_TAG);  /* Make the tag invalid. */ -      aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS);  /* no busy untagged */ -      aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+1);/* targets active yet */ -      aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+2); -      aic_outb(p, SCB_LIST_NULL, SCB_BUSYTARGETS+3); -    } - -    /* Make sure the last SCB terminates the free list. */ -    aic_outb(p, i - 1, SCBPTR); -    aic_outb(p, SCB_LIST_NULL, SCB_NEXT); - -    /* Ensure we clear the first (0) SCBs control byte. */ -    aic_outb(p, 0, SCBPTR); -    aic_outb(p, 0, SCB_CONTROL); - -    p->scb_data->maxhscbs = i; -    /* -     * Use direct indexing instead for speed -     */ -    if ( i == AIC7XXX_MAXSCB ) -      p->flags &= ~AHC_PAGESCBS; -  } - -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_register - * - * Description: - *   Register a Adaptec aic7xxx chip SCSI controller with the kernel. - *-F*************************************************************************/ -static int -aic7xxx_register(struct scsi_host_template *template, struct aic7xxx_host *p, -  int reset_delay) -{ -  int i, result; -  int max_targets; -  int found = 1; -  unsigned char term, scsi_conf; -  struct Scsi_Host *host; - -  host = p->host; - -  p->scb_data->maxscbs = AIC7XXX_MAXSCB; -  host->can_queue = AIC7XXX_MAXSCB; -  host->cmd_per_lun = 3; -  host->sg_tablesize = AIC7XXX_MAX_SG; -  host->this_id = p->scsi_id; -  host->io_port = p->base; -  host->n_io_port = 0xFF; -  host->base = p->mbase; -  host->irq = p->irq; -  if (p->features & AHC_WIDE) -  { -    host->max_id = 16; -  } -  if (p->features & AHC_TWIN) -  { -    host->max_channel = 1; -  } - -  p->host = host; -  p->host_no = host->host_no; -  host->unique_id = p->instance; -  p->isr_count = 0; -  p->next = NULL; -  p->completeq.head = NULL; -  p->completeq.tail = NULL; -  scbq_init(&p->scb_data->free_scbs); -  scbq_init(&p->waiting_scbs); -  INIT_LIST_HEAD(&p->aic_devs); - -  /* -   * We currently have no commands of any type -   */ -  p->qinfifonext = 0; -  p->qoutfifonext = 0; - -  printk(KERN_INFO "(scsi%d) <%s> found at ", p->host_no, -    board_names[p->board_name_index]); -  switch(p->chip) -  { -    case (AHC_AIC7770|AHC_EISA): -      printk("EISA slot %d\n", p->pci_device_fn); -      break; -    case (AHC_AIC7770|AHC_VL): -      printk("VLB slot %d\n", p->pci_device_fn); -      break; -    default: -      printk("PCI %d/%d/%d\n", p->pci_bus, PCI_SLOT(p->pci_device_fn), -        PCI_FUNC(p->pci_device_fn)); -      break; -  } -  if (p->features & AHC_TWIN) -  { -    printk(KERN_INFO "(scsi%d) Twin Channel, A SCSI ID %d, B SCSI ID %d, ", -           p->host_no, p->scsi_id, p->scsi_id_b); -  } -  else -  { -    char *channel; - -    channel = ""; - -    if ((p->flags & AHC_MULTI_CHANNEL) != 0) -    { -      channel = " A"; - -      if ( (p->flags & (AHC_CHNLB|AHC_CHNLC)) != 0 ) -      { -        channel = (p->flags & AHC_CHNLB) ? " B" : " C"; -      } -    } -    if (p->features & AHC_WIDE) -    { -      printk(KERN_INFO "(scsi%d) Wide ", p->host_no); -    } -    else -    { -      printk(KERN_INFO "(scsi%d) Narrow ", p->host_no); -    } -    printk("Channel%s, SCSI ID=%d, ", channel, p->scsi_id); -  } -  aic_outb(p, 0, SEQ_FLAGS); - -  detect_maxscb(p); - -  printk("%d/%d SCBs\n", p->scb_data->maxhscbs, p->scb_data->maxscbs); -  if (aic7xxx_verbose & VERBOSE_PROBE2) -  { -    printk(KERN_INFO "(scsi%d) BIOS %sabled, IO Port 0x%lx, IRQ %d\n", -      p->host_no, (p->flags & AHC_BIOS_ENABLED) ? "en" : "dis", -      p->base, p->irq); -    printk(KERN_INFO "(scsi%d) IO Memory at 0x%lx, MMAP Memory at %p\n", -      p->host_no, p->mbase, p->maddr); -  } - -#ifdef CONFIG_PCI -  /* -   * Now that we know our instance number, we can set the flags we need to -   * force termination if need be. -   */ -  if (aic7xxx_stpwlev != -1) -  { -    /* -     * This option only applies to PCI controllers. -     */ -    if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) -    { -      unsigned char devconfig; - -      pci_read_config_byte(p->pdev, DEVCONFIG, &devconfig); -      if ( (aic7xxx_stpwlev >> p->instance) & 0x01 ) -      { -        devconfig |= STPWLEVEL; -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk("(scsi%d) Force setting STPWLEVEL bit\n", p->host_no); -      } -      else -      { -        devconfig &= ~STPWLEVEL; -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk("(scsi%d) Force clearing STPWLEVEL bit\n", p->host_no); -      } -      pci_write_config_byte(p->pdev, DEVCONFIG, devconfig); -    } -  } -#endif - -  /* -   * That took care of devconfig and stpwlev, now for the actual termination -   * settings. -   */ -  if (aic7xxx_override_term != -1) -  { -    /* -     * Again, this only applies to PCI controllers.  We don't have problems -     * with the termination on 274x controllers to the best of my knowledge. -     */ -    if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) -    { -      unsigned char term_override; - -      term_override = ( (aic7xxx_override_term >> (p->instance * 4)) & 0x0f); -      p->adapter_control &=  -        ~(CFSTERM|CFWSTERM|CFLVDSTERM|CFAUTOTERM|CFSEAUTOTERM); -      if ( (p->features & AHC_ULTRA2) && (term_override & 0x0c) ) -      { -        p->adapter_control |= CFLVDSTERM; -      } -      if (term_override & 0x02) -      { -        p->adapter_control |= CFWSTERM; -      } -      if (term_override & 0x01) -      { -        p->adapter_control |= CFSTERM; -      } -    } -  } - -  if ( (p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1) ) -  { -    if (p->features & AHC_SPIOCAP) -    { -      if ( aic_inb(p, SPIOCAP) & SSPIOCPS ) -      /* -       * Update the settings in sxfrctl1 to match the termination -       * settings. -       */ -        configure_termination(p); -    } -    else if ((p->chip & AHC_CHIPID_MASK) >= AHC_AIC7870) -    { -      configure_termination(p); -    } -  } - -  /* -   * Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels -   */ -  if (p->features & AHC_TWIN) -  { -    /* Select channel B */ -    aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL); - -    if ((p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1)) -      term = (aic_inb(p, SXFRCTL1) & STPWEN); -    else -      term = ((p->flags & AHC_TERM_ENB_B) ? STPWEN : 0); - -    aic_outb(p, p->scsi_id_b, SCSIID); -    scsi_conf = aic_inb(p, SCSICONF + 1); -    aic_outb(p, DFON | SPIOEN, SXFRCTL0); -    aic_outb(p, (scsi_conf & ENSPCHK) | aic7xxx_seltime | term |  -         ENSTIMER | ACTNEGEN, SXFRCTL1); -    aic_outb(p, 0, SIMODE0); -    aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1); -    aic_outb(p, 0, SCSIRATE); - -    /* Select channel A */ -    aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL); -  } - -  if (p->features & AHC_ULTRA2) -  { -    aic_outb(p, p->scsi_id, SCSIID_ULTRA2); -  } -  else -  { -    aic_outb(p, p->scsi_id, SCSIID); -  } -  if ((p->flags & AHC_SEEPROM_FOUND) || (aic7xxx_override_term != -1)) -    term = (aic_inb(p, SXFRCTL1) & STPWEN); -  else -    term = ((p->flags & (AHC_TERM_ENB_A|AHC_TERM_ENB_LVD)) ? STPWEN : 0); -  scsi_conf = aic_inb(p, SCSICONF); -  aic_outb(p, DFON | SPIOEN, SXFRCTL0); -  aic_outb(p, (scsi_conf & ENSPCHK) | aic7xxx_seltime | term |  -       ENSTIMER | ACTNEGEN, SXFRCTL1); -  aic_outb(p, 0, SIMODE0); -  /* -   * If we are a cardbus adapter then don't enable SCSI reset detection. -   * We shouldn't likely be sharing SCSI busses with someone else, and -   * if we don't have a cable currently plugged into the controller then -   * we won't have a power source for the SCSI termination, which means -   * we'll see infinite incoming bus resets. -   */ -  if(p->flags & AHC_NO_STPWEN) -    aic_outb(p, ENSELTIMO | ENSCSIPERR, SIMODE1); -  else -    aic_outb(p, ENSELTIMO | ENSCSIRST | ENSCSIPERR, SIMODE1); -  aic_outb(p, 0, SCSIRATE); -  if ( p->features & AHC_ULTRA2) -    aic_outb(p, 0, SCSIOFFSET); - -  /* -   * Look at the information that board initialization or the board -   * BIOS has left us. In the lower four bits of each target's -   * scratch space any value other than 0 indicates that we should -   * initiate synchronous transfers. If it's zero, the user or the -   * BIOS has decided to disable synchronous negotiation to that -   * target so we don't activate the needsdtr flag. -   */ -  if ((p->features & (AHC_TWIN|AHC_WIDE)) == 0) -  { -    max_targets = 8; -  } -  else -  { -    max_targets = 16; -  } - -  if (!(aic7xxx_no_reset)) -  { -    /* -     * If we reset the bus, then clear the transfer settings, else leave -     * them be. -     */ -    aic_outb(p, 0, ULTRA_ENB); -    aic_outb(p, 0, ULTRA_ENB + 1); -    p->ultraenb = 0; -  } - -  /* -   * Allocate enough hardware scbs to handle the maximum number of -   * concurrent transactions we can have.  We have to make sure that -   * the allocated memory is contiguous memory.  The Linux kmalloc -   * routine should only allocate contiguous memory, but note that -   * this could be a problem if kmalloc() is changed. -   */ -  { -    size_t array_size; -    unsigned int hscb_physaddr; - -    array_size = p->scb_data->maxscbs * sizeof(struct aic7xxx_hwscb); -    if (p->scb_data->hscbs == NULL) -    { -      /* pci_alloc_consistent enforces the alignment already and -       * clears the area as well. -       */ -      p->scb_data->hscbs = pci_alloc_consistent(p->pdev, array_size, -						&p->scb_data->hscbs_dma); -      /* We have to use pci_free_consistent, not kfree */ -      p->scb_data->hscb_kmalloc_ptr = NULL; -      p->scb_data->hscbs_dma_len = array_size; -    } -    if (p->scb_data->hscbs == NULL) -    { -      printk("(scsi%d) Unable to allocate hardware SCB array; " -             "failing detection.\n", p->host_no); -      aic_outb(p, 0, SIMODE1); -      p->irq = 0; -      return(0); -    } - -    hscb_physaddr = p->scb_data->hscbs_dma; -    aic_outb(p, hscb_physaddr & 0xFF, HSCB_ADDR); -    aic_outb(p, (hscb_physaddr >> 8) & 0xFF, HSCB_ADDR + 1); -    aic_outb(p, (hscb_physaddr >> 16) & 0xFF, HSCB_ADDR + 2); -    aic_outb(p, (hscb_physaddr >> 24) & 0xFF, HSCB_ADDR + 3); - -    /* Set up the fifo areas at the same time */ -    p->untagged_scbs = pci_alloc_consistent(p->pdev, 3*256, &p->fifo_dma); -    if (p->untagged_scbs == NULL) -    { -      printk("(scsi%d) Unable to allocate hardware FIFO arrays; " -             "failing detection.\n", p->host_no); -      p->irq = 0; -      return(0); -    } - -    p->qoutfifo = p->untagged_scbs + 256; -    p->qinfifo = p->qoutfifo + 256; -    for (i = 0; i < 256; i++) -    { -      p->untagged_scbs[i] = SCB_LIST_NULL; -      p->qinfifo[i] = SCB_LIST_NULL; -      p->qoutfifo[i] = SCB_LIST_NULL; -    } - -    hscb_physaddr = p->fifo_dma; -    aic_outb(p, hscb_physaddr & 0xFF, SCBID_ADDR); -    aic_outb(p, (hscb_physaddr >> 8) & 0xFF, SCBID_ADDR + 1); -    aic_outb(p, (hscb_physaddr >> 16) & 0xFF, SCBID_ADDR + 2); -    aic_outb(p, (hscb_physaddr >> 24) & 0xFF, SCBID_ADDR + 3); -  } - -  /* The Q-FIFOs we just set up are all empty */ -  aic_outb(p, 0, QINPOS); -  aic_outb(p, 0, KERNEL_QINPOS); -  aic_outb(p, 0, QOUTPOS); - -  if(p->features & AHC_QUEUE_REGS) -  { -    aic_outb(p, SCB_QSIZE_256, QOFF_CTLSTA); -    aic_outb(p, 0, SDSCB_QOFF); -    aic_outb(p, 0, SNSCB_QOFF); -    aic_outb(p, 0, HNSCB_QOFF); -  } - -  /* -   * We don't have any waiting selections or disconnected SCBs. -   */ -  aic_outb(p, SCB_LIST_NULL, WAITING_SCBH); -  aic_outb(p, SCB_LIST_NULL, DISCONNECTED_SCBH); - -  /* -   * Message out buffer starts empty -   */ -  aic_outb(p, MSG_NOOP, MSG_OUT); -  aic_outb(p, MSG_NOOP, LAST_MSG); - -  /* -   * Set all the other asundry items that haven't been set yet. -   * This includes just dumping init values to a lot of registers simply -   * to make sure they've been touched and are ready for use parity wise -   * speaking. -   */ -  aic_outb(p, 0, TMODE_CMDADDR); -  aic_outb(p, 0, TMODE_CMDADDR + 1); -  aic_outb(p, 0, TMODE_CMDADDR + 2); -  aic_outb(p, 0, TMODE_CMDADDR + 3); -  aic_outb(p, 0, TMODE_CMDADDR_NEXT); - -  /* -   * Link us into the list of valid hosts -   */ -  p->next = first_aic7xxx; -  first_aic7xxx = p; - -  /* -   * Allocate the first set of scbs for this controller.  This is to stream- -   * line code elsewhere in the driver.  If we have to check for the existence -   * of scbs in certain code sections, it slows things down.  However, as -   * soon as we register the IRQ for this card, we could get an interrupt that -   * includes possibly the SCSI_RSTI interrupt.  If we catch that interrupt -   * then we are likely to segfault if we don't have at least one chunk of -   * SCBs allocated or add checks all through the reset code to make sure -   * that the SCBs have been allocated which is an invalid running condition -   * and therefore I think it's preferable to simply pre-allocate the first -   * chunk of SCBs. -   */ -  aic7xxx_allocate_scb(p); - -  /* -   * Load the sequencer program, then re-enable the board - -   * resetting the AIC-7770 disables it, leaving the lights -   * on with nobody home. -   */ -  aic7xxx_loadseq(p); - -  /* -   * Make sure the AUTOFLUSHDIS bit is *not* set in the SBLKCTL register -   */ -  aic_outb(p, aic_inb(p, SBLKCTL) & ~AUTOFLUSHDIS, SBLKCTL); - -  if ( (p->chip & AHC_CHIPID_MASK) == AHC_AIC7770 ) -  { -    aic_outb(p, ENABLE, BCTL);  /* Enable the boards BUS drivers. */ -  } - -  if ( !(aic7xxx_no_reset) ) -  { -    if (p->features & AHC_TWIN) -    { -      if (aic7xxx_verbose & VERBOSE_PROBE2) -        printk(KERN_INFO "(scsi%d) Resetting channel B\n", p->host_no); -      aic_outb(p, aic_inb(p, SBLKCTL) | SELBUSB, SBLKCTL); -      aic7xxx_reset_current_bus(p); -      aic_outb(p, aic_inb(p, SBLKCTL) & ~SELBUSB, SBLKCTL); -    } -    /* Reset SCSI bus A. */ -    if (aic7xxx_verbose & VERBOSE_PROBE2) -    {  /* In case we are a 3940, 3985, or 7895, print the right channel */ -      char *channel = ""; -      if (p->flags & AHC_MULTI_CHANNEL) -      { -        channel = " A"; -        if (p->flags & (AHC_CHNLB|AHC_CHNLC)) -          channel = (p->flags & AHC_CHNLB) ? " B" : " C"; -      } -      printk(KERN_INFO "(scsi%d) Resetting channel%s\n", p->host_no, channel); -    } -     -    aic7xxx_reset_current_bus(p); - -  } -  else -  { -    if (!reset_delay) -    { -      printk(KERN_INFO "(scsi%d) Not resetting SCSI bus.  Note: Don't use " -             "the no_reset\n", p->host_no); -      printk(KERN_INFO "(scsi%d) option unless you have a verifiable need " -             "for it.\n", p->host_no); -    } -  } -   -  /* -   * Register IRQ with the kernel.  Only allow sharing IRQs with -   * PCI devices. -   */ -  if (!(p->chip & AHC_PCI)) -  { -    result = (request_irq(p->irq, do_aic7xxx_isr, 0, "aic7xxx", p)); -  } -  else -  { -    result = (request_irq(p->irq, do_aic7xxx_isr, IRQF_SHARED, -              "aic7xxx", p)); -    if (result < 0) -    { -      result = (request_irq(p->irq, do_aic7xxx_isr, IRQF_DISABLED | IRQF_SHARED, -              "aic7xxx", p)); -    } -  } -  if (result < 0) -  { -    printk(KERN_WARNING "(scsi%d) Couldn't register IRQ %d, ignoring " -           "controller.\n", p->host_no, p->irq); -    aic_outb(p, 0, SIMODE1); -    p->irq = 0; -    return (0); -  } - -  if(aic_inb(p, INTSTAT) & INT_PEND) -    printk(INFO_LEAD "spurious interrupt during configuration, cleared.\n", -      p->host_no, -1, -1 , -1); -  aic7xxx_clear_intstat(p); - -  unpause_sequencer(p, /* unpause_always */ TRUE); - -  return (found); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_chip_reset - * - * Description: - *   Perform a chip reset on the aic7xxx SCSI controller.  The controller - *   is paused upon return. - *-F*************************************************************************/ -static int -aic7xxx_chip_reset(struct aic7xxx_host *p) -{ -  unsigned char sblkctl; -  int wait; - -  /* -   * For some 274x boards, we must clear the CHIPRST bit and pause -   * the sequencer. For some reason, this makes the driver work. -   */ -  aic_outb(p, PAUSE | CHIPRST, HCNTRL); - -  /* -   * In the future, we may call this function as a last resort for -   * error handling.  Let's be nice and not do any unnecessary delays. -   */ -  wait = 1000;  /* 1 msec (1000 * 1 msec) */ -  while (--wait && !(aic_inb(p, HCNTRL) & CHIPRSTACK)) -  { -    udelay(1);  /* 1 usec */ -  } - -  pause_sequencer(p); - -  sblkctl = aic_inb(p, SBLKCTL) & (SELBUSB|SELWIDE); -  if (p->chip & AHC_PCI) -    sblkctl &= ~SELBUSB; -  switch( sblkctl ) -  { -    case 0:  /* normal narrow card */ -      break; -    case 2:  /* Wide card */ -      p->features |= AHC_WIDE; -      break; -    case 8:  /* Twin card */ -      p->features |= AHC_TWIN; -      p->flags |= AHC_MULTI_CHANNEL; -      break; -    default: /* hmmm...we don't know what this is */ -      printk(KERN_WARNING "aic7xxx: Unsupported adapter type %d, ignoring.\n", -        aic_inb(p, SBLKCTL) & 0x0a); -      return(-1); -  } -  return(0); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_alloc - * - * Description: - *   Allocate and initialize a host structure.  Returns NULL upon error - *   and a pointer to a aic7xxx_host struct upon success. - *-F*************************************************************************/ -static struct aic7xxx_host * -aic7xxx_alloc(struct scsi_host_template *sht, struct aic7xxx_host *temp) -{ -  struct aic7xxx_host *p = NULL; -  struct Scsi_Host *host; - -  /* -   * Allocate a storage area by registering us with the mid-level -   * SCSI layer. -   */ -  host = scsi_register(sht, sizeof(struct aic7xxx_host)); - -  if (host != NULL) -  { -    p = (struct aic7xxx_host *) host->hostdata; -    memset(p, 0, sizeof(struct aic7xxx_host)); -    *p = *temp; -    p->host = host; - -    p->scb_data = kzalloc(sizeof(scb_data_type), GFP_ATOMIC); -    if (p->scb_data) -    { -      scbq_init (&p->scb_data->free_scbs); -    } -    else -    { -      /* -       * For some reason we don't have enough memory.  Free the -       * allocated memory for the aic7xxx_host struct, and return NULL. -       */ -      release_region(p->base, MAXREG - MINREG); -      scsi_unregister(host); -      return(NULL); -    } -    p->host_no = host->host_no; -  } -  return (p); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_free - * - * Description: - *   Frees and releases all resources associated with an instance of - *   the driver (struct aic7xxx_host *). - *-F*************************************************************************/ -static void -aic7xxx_free(struct aic7xxx_host *p) -{ -  int i; - -  /* -   * Free the allocated hardware SCB space. -   */ -  if (p->scb_data != NULL) -  { -    struct aic7xxx_scb_dma *scb_dma = NULL; -    if (p->scb_data->hscbs != NULL) -    { -      pci_free_consistent(p->pdev, p->scb_data->hscbs_dma_len, -			  p->scb_data->hscbs, p->scb_data->hscbs_dma); -      p->scb_data->hscbs = p->scb_data->hscb_kmalloc_ptr = NULL; -    } -    /* -     * Free the driver SCBs.  These were allocated on an as-need -     * basis.  We allocated these in groups depending on how many -     * we could fit into a given amount of RAM.  The tail SCB for -     * these allocations has a pointer to the alloced area. -     */ -    for (i = 0; i < p->scb_data->numscbs; i++) -    { -      if (p->scb_data->scb_array[i]->scb_dma != scb_dma) -      { -	scb_dma = p->scb_data->scb_array[i]->scb_dma; -	pci_free_consistent(p->pdev, scb_dma->dma_len, -			    (void *)((unsigned long)scb_dma->dma_address -                                     - scb_dma->dma_offset), -			    scb_dma->dma_address); -      } -      kfree(p->scb_data->scb_array[i]->kmalloc_ptr); -      p->scb_data->scb_array[i] = NULL; -    } -   -    /* -     * Free the SCB data area. -     */ -    kfree(p->scb_data); -  } - -  pci_free_consistent(p->pdev, 3*256, (void *)p->untagged_scbs, p->fifo_dma); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_load_seeprom - * - * Description: - *   Load the seeprom and configure adapter and target settings. - *   Returns 1 if the load was successful and 0 otherwise. - *-F*************************************************************************/ -static void -aic7xxx_load_seeprom(struct aic7xxx_host *p, unsigned char *sxfrctl1) -{ -  int have_seeprom = 0; -  int i, max_targets, mask; -  unsigned char scsirate, scsi_conf; -  unsigned short scarray[128]; -  struct seeprom_config *sc = (struct seeprom_config *) scarray; - -  if (aic7xxx_verbose & VERBOSE_PROBE2) -  { -    printk(KERN_INFO "aic7xxx: Loading serial EEPROM..."); -  } -  switch (p->chip) -  { -    case (AHC_AIC7770|AHC_EISA):  /* None of these adapters have seeproms. */ -      if (aic_inb(p, SCSICONF) & TERM_ENB) -        p->flags |= AHC_TERM_ENB_A; -      if ( (p->features & AHC_TWIN) && (aic_inb(p, SCSICONF + 1) & TERM_ENB) ) -        p->flags |= AHC_TERM_ENB_B; -      break; - -    case (AHC_AIC7770|AHC_VL): -      have_seeprom = read_284x_seeprom(p, (struct seeprom_config *) scarray); -      break; - -    default: -      have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)), -                                  scarray, p->sc_size, p->sc_type); -      if (!have_seeprom) -      { -        if(p->sc_type == C46) -          have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)), -                                      scarray, p->sc_size, C56_66); -        else -          have_seeprom = read_seeprom(p, (p->flags & (AHC_CHNLB|AHC_CHNLC)), -                                      scarray, p->sc_size, C46); -      } -      if (!have_seeprom) -      { -        p->sc_size = 128; -        have_seeprom = read_seeprom(p, 4*(p->flags & (AHC_CHNLB|AHC_CHNLC)), -                                    scarray, p->sc_size, p->sc_type); -        if (!have_seeprom) -        { -          if(p->sc_type == C46) -            have_seeprom = read_seeprom(p, 4*(p->flags & (AHC_CHNLB|AHC_CHNLC)), -                                        scarray, p->sc_size, C56_66); -          else -            have_seeprom = read_seeprom(p, 4*(p->flags & (AHC_CHNLB|AHC_CHNLC)), -                                        scarray, p->sc_size, C46); -        } -      } -      break; -  } - -  if (!have_seeprom) -  { -    if (aic7xxx_verbose & VERBOSE_PROBE2) -    { -      printk("\naic7xxx: No SEEPROM available.\n"); -    } -    p->flags |= AHC_NEWEEPROM_FMT; -    if (aic_inb(p, SCSISEQ) == 0) -    { -      p->flags |= AHC_USEDEFAULTS; -      p->flags &= ~AHC_BIOS_ENABLED; -      p->scsi_id = p->scsi_id_b = 7; -      *sxfrctl1 |= STPWEN; -      if (aic7xxx_verbose & VERBOSE_PROBE2) -      { -        printk("aic7xxx: Using default values.\n"); -      } -    } -    else if (aic7xxx_verbose & VERBOSE_PROBE2) -    { -      printk("aic7xxx: Using leftover BIOS values.\n"); -    } -    if ( ((p->chip & ~AHC_CHIPID_MASK) == AHC_PCI) && (*sxfrctl1 & STPWEN) ) -    { -      p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH; -      sc->adapter_control &= ~CFAUTOTERM; -      sc->adapter_control |= CFSTERM | CFWSTERM | CFLVDSTERM; -    } -    if (aic7xxx_extended) -      p->flags |= (AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B); -    else -      p->flags &= ~(AHC_EXTEND_TRANS_A | AHC_EXTEND_TRANS_B); -  } -  else -  { -    if (aic7xxx_verbose & VERBOSE_PROBE2) -    { -      printk("done\n"); -    } - -    /* -     * Note things in our flags -     */ -    p->flags |= AHC_SEEPROM_FOUND; - -    /* -     * Update the settings in sxfrctl1 to match the termination settings. -     */ -    *sxfrctl1 = 0; - -    /* -     * Get our SCSI ID from the SEEPROM setting... -     */ -    p->scsi_id = (sc->brtime_id & CFSCSIID); - -    /* -     * First process the settings that are different between the VLB -     * and PCI adapter seeproms. -     */ -    if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7770) -    { -      /* VLB adapter seeproms */ -      if (sc->bios_control & CF284XEXTEND) -        p->flags |= AHC_EXTEND_TRANS_A; - -      if (sc->adapter_control & CF284XSTERM) -      { -        *sxfrctl1 |= STPWEN; -        p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH; -      } -    } -    else -    { -      /* PCI adapter seeproms */ -      if (sc->bios_control & CFEXTEND) -        p->flags |= AHC_EXTEND_TRANS_A; -      if (sc->bios_control & CFBIOSEN) -        p->flags |= AHC_BIOS_ENABLED; -      else -        p->flags &= ~AHC_BIOS_ENABLED; - -      if (sc->adapter_control & CFSTERM) -      { -        *sxfrctl1 |= STPWEN; -        p->flags |= AHC_TERM_ENB_SE_LOW | AHC_TERM_ENB_SE_HIGH; -      } -    } -    memcpy(&p->sc, sc, sizeof(struct seeprom_config)); -  } - -  p->discenable = 0; - -  /* -   * Limit to 16 targets just in case.  The 2842 for one is known to -   * blow the max_targets setting, future cards might also. -   */ -  max_targets = ((p->features & (AHC_TWIN | AHC_WIDE)) ? 16 : 8); - -  if (have_seeprom) -  { -    for (i = 0; i < max_targets; i++) -    { -      if( ((p->features & AHC_ULTRA) && -          !(sc->adapter_control & CFULTRAEN) && -           (sc->device_flags[i] & CFSYNCHISULTRA)) || -          (sc->device_flags[i] & CFNEWULTRAFORMAT) ) -      { -        p->flags |= AHC_NEWEEPROM_FMT; -        break; -      } -    } -  } - -  for (i = 0; i < max_targets; i++) -  { -    mask = (0x01 << i); -    if (!have_seeprom) -    { -      if (aic_inb(p, SCSISEQ) != 0) -      { -        /* -         * OK...the BIOS set things up and left behind the settings we need. -         * Just make our sc->device_flags[i] entry match what the card has -         * set for this device. -         */ -	p->discenable = -	  ~(aic_inb(p, DISC_DSB) | (aic_inb(p, DISC_DSB + 1) << 8) ); -        p->ultraenb = -          (aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8) ); -	sc->device_flags[i] = (p->discenable & mask) ? CFDISC : 0; -        if (aic_inb(p, TARG_SCSIRATE + i) & WIDEXFER) -          sc->device_flags[i] |= CFWIDEB; -        if (p->features & AHC_ULTRA2) -        { -          if (aic_inb(p, TARG_OFFSET + i)) -          { -            sc->device_flags[i] |= CFSYNCH; -            sc->device_flags[i] |= (aic_inb(p, TARG_SCSIRATE + i) & 0x07); -            if ( (aic_inb(p, TARG_SCSIRATE + i) & 0x18) == 0x18 ) -              sc->device_flags[i] |= CFSYNCHISULTRA; -          } -        } -        else -        { -          if (aic_inb(p, TARG_SCSIRATE + i) & ~WIDEXFER) -          { -            sc->device_flags[i] |= CFSYNCH; -            if (p->features & AHC_ULTRA) -              sc->device_flags[i] |= ((p->ultraenb & mask) ? -                                      CFSYNCHISULTRA : 0); -          } -        } -      } -      else -      { -        /* -         * Assume the BIOS has NOT been run on this card and nothing between -         * the card and the devices is configured yet. -         */ -        sc->device_flags[i] = CFDISC; -        if (p->features & AHC_WIDE) -          sc->device_flags[i] |= CFWIDEB; -        if (p->features & AHC_ULTRA3) -          sc->device_flags[i] |= 2; -        else if (p->features & AHC_ULTRA2) -          sc->device_flags[i] |= 3; -        else if (p->features & AHC_ULTRA) -          sc->device_flags[i] |= CFSYNCHISULTRA; -        sc->device_flags[i] |= CFSYNCH; -        aic_outb(p, 0, TARG_SCSIRATE + i); -        if (p->features & AHC_ULTRA2) -          aic_outb(p, 0, TARG_OFFSET + i); -      } -    } -    if (sc->device_flags[i] & CFDISC) -    { -      p->discenable |= mask; -    } -    if (p->flags & AHC_NEWEEPROM_FMT) -    { -      if ( !(p->features & AHC_ULTRA2) ) -      { -        /* -         * I know of two different Ultra BIOSes that do this differently. -         * One on the Gigabyte 6BXU mb that wants flags[i] & CFXFER to -         * be == to 0x03 and SYNCHISULTRA to be true to mean 40MByte/s -         * while on the IBM Netfinity 5000 they want the same thing -         * to be something else, while flags[i] & CFXFER == 0x03 and -         * SYNCHISULTRA false should be 40MByte/s.  So, we set both to -         * 40MByte/s and the lower speeds be damned.  People will have -         * to select around the conversely mapped lower speeds in order -         * to select lower speeds on these boards. -         */ -        if ( (sc->device_flags[i] & CFNEWULTRAFORMAT) && -            ((sc->device_flags[i] & CFXFER) == 0x03) ) -        { -          sc->device_flags[i] &= ~CFXFER; -          sc->device_flags[i] |= CFSYNCHISULTRA; -        } -        if (sc->device_flags[i] & CFSYNCHISULTRA) -        { -          p->ultraenb |= mask; -        } -      } -      else if ( !(sc->device_flags[i] & CFNEWULTRAFORMAT) && -                 (p->features & AHC_ULTRA2) && -		 (sc->device_flags[i] & CFSYNCHISULTRA) ) -      { -        p->ultraenb |= mask; -      } -    } -    else if (sc->adapter_control & CFULTRAEN) -    { -      p->ultraenb |= mask; -    } -    if ( (sc->device_flags[i] & CFSYNCH) == 0) -    { -      sc->device_flags[i] &= ~CFXFER; -      p->ultraenb &= ~mask; -      p->user[i].offset = 0; -      p->user[i].period = 0; -      p->user[i].options = 0; -    } -    else -    { -      if (p->features & AHC_ULTRA3) -      { -        p->user[i].offset = MAX_OFFSET_ULTRA2; -        if( (sc->device_flags[i] & CFXFER) < 0x03 ) -        { -          scsirate = (sc->device_flags[i] & CFXFER); -          p->user[i].options = MSG_EXT_PPR_OPTION_DT_CRC; -        } -        else -        { -          scsirate = (sc->device_flags[i] & CFXFER) | -                     ((p->ultraenb & mask) ? 0x18 : 0x10); -          p->user[i].options = 0; -        } -        p->user[i].period = aic7xxx_find_period(p, scsirate, -                                       AHC_SYNCRATE_ULTRA3); -      } -      else if (p->features & AHC_ULTRA2) -      { -        p->user[i].offset = MAX_OFFSET_ULTRA2; -        scsirate = (sc->device_flags[i] & CFXFER) | -                   ((p->ultraenb & mask) ? 0x18 : 0x10); -        p->user[i].options = 0; -        p->user[i].period = aic7xxx_find_period(p, scsirate, -                                       AHC_SYNCRATE_ULTRA2); -      } -      else -      { -        scsirate = (sc->device_flags[i] & CFXFER) << 4; -        p->user[i].options = 0; -        p->user[i].offset = MAX_OFFSET_8BIT; -        if (p->features & AHC_ULTRA) -        { -          short ultraenb; -          ultraenb = aic_inb(p, ULTRA_ENB) | -            (aic_inb(p, ULTRA_ENB + 1) << 8); -          p->user[i].period = aic7xxx_find_period(p, scsirate, -                                          (p->ultraenb & mask) ? -                                          AHC_SYNCRATE_ULTRA : -                                          AHC_SYNCRATE_FAST); -        } -        else -          p->user[i].period = aic7xxx_find_period(p, scsirate, -			  		  AHC_SYNCRATE_FAST); -      } -    } -    if ( (sc->device_flags[i] & CFWIDEB) && (p->features & AHC_WIDE) ) -    { -      p->user[i].width = MSG_EXT_WDTR_BUS_16_BIT; -    } -    else -    { -      p->user[i].width = MSG_EXT_WDTR_BUS_8_BIT; -    } -  } -  aic_outb(p, ~(p->discenable & 0xFF), DISC_DSB); -  aic_outb(p, ~((p->discenable >> 8) & 0xFF), DISC_DSB + 1); - -  /* -   * We set the p->ultraenb from the SEEPROM to begin with, but now we make -   * it match what is already down in the card.  If we are doing a reset -   * on the card then this will get put back to a default state anyway. -   * This allows us to not have to pre-emptively negotiate when using the -   * no_reset option. -   */ -  if (p->features & AHC_ULTRA) -    p->ultraenb = aic_inb(p, ULTRA_ENB) | (aic_inb(p, ULTRA_ENB + 1) << 8); - -   -  scsi_conf = (p->scsi_id & HSCSIID); - -  if(have_seeprom) -  { -    p->adapter_control = sc->adapter_control; -    p->bios_control = sc->bios_control; - -    switch (p->chip & AHC_CHIPID_MASK) -    { -      case AHC_AIC7895: -      case AHC_AIC7896: -      case AHC_AIC7899: -        if (p->adapter_control & CFBPRIMARY) -          p->flags |= AHC_CHANNEL_B_PRIMARY; -      default: -        break; -    } - -    if (sc->adapter_control & CFSPARITY) -      scsi_conf |= ENSPCHK; -  } -  else -  { -    scsi_conf |= ENSPCHK | RESET_SCSI; -  } - -  /* -   * Only set the SCSICONF and SCSICONF + 1 registers if we are a PCI card. -   * The 2842 and 2742 cards already have these registers set and we don't -   * want to muck with them since we don't set all the bits they do. -   */ -  if ( (p->chip & ~AHC_CHIPID_MASK) == AHC_PCI ) -  { -    /* Set the host ID */ -    aic_outb(p, scsi_conf, SCSICONF); -    /* In case we are a wide card */ -    aic_outb(p, p->scsi_id, SCSICONF + 1); -  } -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_configure_bugs - * - * Description: - *   Take the card passed in and set the appropriate bug flags based upon - *   the card model.  Also make any changes needed to device registers or - *   PCI registers while we are here. - *-F*************************************************************************/ -static void -aic7xxx_configure_bugs(struct aic7xxx_host *p) -{ -  unsigned short tmp_word; -  -  switch(p->chip & AHC_CHIPID_MASK) -  { -    case AHC_AIC7860: -      p->bugs |= AHC_BUG_PCI_2_1_RETRY; -      /* fall through */ -    case AHC_AIC7850: -    case AHC_AIC7870: -      p->bugs |= AHC_BUG_TMODE_WIDEODD | AHC_BUG_CACHETHEN | AHC_BUG_PCI_MWI; -      break; -    case AHC_AIC7880: -      p->bugs |= AHC_BUG_TMODE_WIDEODD | AHC_BUG_PCI_2_1_RETRY | -                 AHC_BUG_CACHETHEN | AHC_BUG_PCI_MWI; -      break; -    case AHC_AIC7890: -      p->bugs |= AHC_BUG_AUTOFLUSH | AHC_BUG_CACHETHEN; -      break; -    case AHC_AIC7892: -      p->bugs |= AHC_BUG_SCBCHAN_UPLOAD; -      break; -    case AHC_AIC7895: -      p->bugs |= AHC_BUG_TMODE_WIDEODD | AHC_BUG_PCI_2_1_RETRY | -                 AHC_BUG_CACHETHEN | AHC_BUG_PCI_MWI; -      break; -    case AHC_AIC7896: -      p->bugs |= AHC_BUG_CACHETHEN_DIS; -      break; -    case AHC_AIC7899: -      p->bugs |= AHC_BUG_SCBCHAN_UPLOAD; -      break; -    default: -      /* Nothing to do */ -      break; -  } - -  /* -   * Now handle the bugs that require PCI register or card register tweaks -   */ -  pci_read_config_word(p->pdev, PCI_COMMAND, &tmp_word); -  if(p->bugs & AHC_BUG_PCI_MWI) -  { -    tmp_word &= ~PCI_COMMAND_INVALIDATE; -  } -  else -  { -    tmp_word |= PCI_COMMAND_INVALIDATE; -  } -  pci_write_config_word(p->pdev, PCI_COMMAND, tmp_word); - -  if(p->bugs & AHC_BUG_CACHETHEN) -  { -    aic_outb(p, aic_inb(p, DSCOMMAND0) & ~CACHETHEN, DSCOMMAND0); -  } -  else if (p->bugs & AHC_BUG_CACHETHEN_DIS) -  { -    aic_outb(p, aic_inb(p, DSCOMMAND0) | CACHETHEN, DSCOMMAND0); -  } - -  return; -} - - -/*+F************************************************************************* - * Function: - *   aic7xxx_detect - * - * Description: - *   Try to detect and register an Adaptec 7770 or 7870 SCSI controller. - * - * XXX - This should really be called aic7xxx_probe().  A sequence of - *       probe(), attach()/detach(), and init() makes more sense than - *       one do-it-all function.  This may be useful when (and if) the - *       mid-level SCSI code is overhauled. - *-F*************************************************************************/ -static int -aic7xxx_detect(struct scsi_host_template *template) -{ -  struct aic7xxx_host *temp_p = NULL; -  struct aic7xxx_host *current_p = NULL; -  struct aic7xxx_host *list_p = NULL; -  int found = 0; -#if defined(__i386__) || defined(__alpha__) -  ahc_flag_type flags = 0; -  int type; -#endif -  unsigned char sxfrctl1; -#if defined(__i386__) || defined(__alpha__) -  unsigned char hcntrl, hostconf; -  unsigned int slot, base; -#endif - -#ifdef MODULE -  /* -   * If we are called as a module, the aic7xxx pointer may not be null -   * and it would point to our bootup string, just like on the lilo -   * command line.  IF not NULL, then process this config string with -   * aic7xxx_setup -   */ -  if(aic7xxx) -    aic7xxx_setup(aic7xxx); -#endif - -  template->proc_name = "aic7xxx"; -  template->sg_tablesize = AIC7XXX_MAX_SG; - - -#ifdef CONFIG_PCI -  /* -   * PCI-bus probe. -   */ -  { -    static struct -    { -      unsigned short      vendor_id; -      unsigned short      device_id; -      ahc_chip            chip; -      ahc_flag_type       flags; -      ahc_feature         features; -      int                 board_name_index; -      unsigned short      seeprom_size; -      unsigned short      seeprom_type; -    } const aic_pdevs[] = { -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7810, AHC_NONE, -       AHC_FNONE, AHC_FENONE,                                1, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7850, AHC_AIC7850, -       AHC_PAGESCBS, AHC_AIC7850_FE,                         5, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7855, AHC_AIC7850, -       AHC_PAGESCBS, AHC_AIC7850_FE,                         6, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7821, AHC_AIC7860, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7860_FE,                                       7, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_3860, AHC_AIC7860, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7860_FE,                                       7, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_38602, AHC_AIC7860, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7860_FE,                                       7, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_38602, AHC_AIC7860, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7860_FE,                                       7, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7860, AHC_AIC7860, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MOTHERBOARD, -       AHC_AIC7860_FE,                                       7, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7861, AHC_AIC7860, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7860_FE,                                       8, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7870, AHC_AIC7870, -       AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MOTHERBOARD, -       AHC_AIC7870_FE,                                       9, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7871, AHC_AIC7870, -       AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE,     10, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7872, AHC_AIC7870, -       AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7870_FE,                                      11, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7873, AHC_AIC7870, -       AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7870_FE,                                      12, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7874, AHC_AIC7870, -       AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7870_FE,     13, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7880, AHC_AIC7880, -       AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MOTHERBOARD, -       AHC_AIC7880_FE,                                      14, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7881, AHC_AIC7880, -       AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE,     15, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7882, AHC_AIC7880, -       AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7880_FE,                                      16, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7883, AHC_AIC7880, -       AHC_PAGESCBS | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7880_FE,                                      17, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7884, AHC_AIC7880, -       AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE,     18, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7885, AHC_AIC7880, -       AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE,     18, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7886, AHC_AIC7880, -       AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE,     18, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7887, AHC_AIC7880, -       AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE | AHC_NEW_AUTOTERM, 19, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7888, AHC_AIC7880, -       AHC_PAGESCBS | AHC_BIOS_ENABLED, AHC_AIC7880_FE,     18, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_7895, AHC_AIC7895, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7895_FE,                                      20, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890, AHC_AIC7890, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7890_FE,                                      21, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7890B, AHC_AIC7890, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7890_FE,                                      21, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2930U2, AHC_AIC7890, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7890_FE,                                      22, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_2940U2, AHC_AIC7890, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7890_FE,                                      23, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7896, AHC_AIC7896, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7896_FE,                                      24, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3940U2, AHC_AIC7896, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7896_FE,                                      25, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_3950U2D, AHC_AIC7896, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7896_FE,                                      26, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC, PCI_DEVICE_ID_ADAPTEC_1480A, AHC_AIC7860, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_NO_STPWEN, -       AHC_AIC7860_FE,                                      27, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892A, AHC_AIC7892, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7892_FE,                                      28, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892B, AHC_AIC7892, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7892_FE,                                      28, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892D, AHC_AIC7892, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7892_FE,                                      28, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7892P, AHC_AIC7892, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED, -       AHC_AIC7892_FE,                                      28, -       32, C46 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899A, AHC_AIC7899, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7899_FE,                                      29, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899B, AHC_AIC7899, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7899_FE,                                      29, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899D, AHC_AIC7899, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7899_FE,                                      29, -       32, C56_66 }, -      {PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_7899P, AHC_AIC7899, -       AHC_PAGESCBS | AHC_NEWEEPROM_FMT | AHC_BIOS_ENABLED | AHC_MULTI_CHANNEL, -       AHC_AIC7899_FE,                                      29, -       32, C56_66 }, -    }; - -    unsigned short command; -    unsigned int  devconfig, i, oldverbose; -    struct pci_dev *pdev = NULL; - -    for (i = 0; i < ARRAY_SIZE(aic_pdevs); i++) -    { -      pdev = NULL; -      while ((pdev = pci_get_device(aic_pdevs[i].vendor_id, -                                     aic_pdevs[i].device_id, -                                     pdev))) { -	if (pci_enable_device(pdev)) -		continue; -        if ( i == 0 ) /* We found one, but it's the 7810 RAID cont. */ -        { -          if (aic7xxx_verbose & (VERBOSE_PROBE|VERBOSE_PROBE2)) -          { -            printk(KERN_INFO "aic7xxx: The 7810 RAID controller is not " -              "supported by\n"); -            printk(KERN_INFO "         this driver, we are ignoring it.\n"); -          } -        } -        else if ( (temp_p = kzalloc(sizeof(struct aic7xxx_host), -                                    GFP_ATOMIC)) != NULL ) -        { -          temp_p->chip = aic_pdevs[i].chip | AHC_PCI; -          temp_p->flags = aic_pdevs[i].flags; -          temp_p->features = aic_pdevs[i].features; -          temp_p->board_name_index = aic_pdevs[i].board_name_index; -          temp_p->sc_size = aic_pdevs[i].seeprom_size; -          temp_p->sc_type = aic_pdevs[i].seeprom_type; - -          /* -           * Read sundry information from PCI BIOS. -           */ -          temp_p->irq = pdev->irq; -          temp_p->pdev = pdev; -          temp_p->pci_bus = pdev->bus->number; -          temp_p->pci_device_fn = pdev->devfn; -          temp_p->base = pci_resource_start(pdev, 0); -          temp_p->mbase = pci_resource_start(pdev, 1); -          current_p = list_p; -	  while(current_p && temp_p) -	  { -	    if ( ((current_p->pci_bus == temp_p->pci_bus) && -	          (current_p->pci_device_fn == temp_p->pci_device_fn)) || -                 (temp_p->base && (current_p->base == temp_p->base)) || -                 (temp_p->mbase && (current_p->mbase == temp_p->mbase)) ) -	    { -              /* duplicate PCI entry, skip it */ -	      kfree(temp_p); -	      temp_p = NULL; -              continue; -	    } -	    current_p = current_p->next; -	  } -          if(pci_request_regions(temp_p->pdev, "aic7xxx")) -          { -            printk("aic7xxx: <%s> at PCI %d/%d/%d\n",  -              board_names[aic_pdevs[i].board_name_index], -              temp_p->pci_bus, -              PCI_SLOT(temp_p->pci_device_fn), -              PCI_FUNC(temp_p->pci_device_fn)); -            printk("aic7xxx: I/O ports already in use, ignoring.\n"); -            kfree(temp_p); -            continue; -          } - -          if (aic7xxx_verbose & VERBOSE_PROBE2) -            printk("aic7xxx: <%s> at PCI %d/%d\n",  -              board_names[aic_pdevs[i].board_name_index], -              PCI_SLOT(pdev->devfn), -              PCI_FUNC(pdev->devfn)); -          pci_read_config_word(pdev, PCI_COMMAND, &command); -          if (aic7xxx_verbose & VERBOSE_PROBE2) -          { -            printk("aic7xxx: Initial PCI_COMMAND value was 0x%x\n", -              (int)command); -          } -#ifdef AIC7XXX_STRICT_PCI_SETUP -          command |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | -            PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO; -#else -          command |= PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO; -#endif -          command &= ~PCI_COMMAND_INVALIDATE; -          if (aic7xxx_pci_parity == 0) -            command &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY); -          pci_write_config_word(pdev, PCI_COMMAND, command); -#ifdef AIC7XXX_STRICT_PCI_SETUP -          pci_read_config_dword(pdev, DEVCONFIG, &devconfig); -          if (aic7xxx_verbose & VERBOSE_PROBE2) -          { -            printk("aic7xxx: Initial DEVCONFIG value was 0x%x\n", devconfig); -          } -          devconfig |= 0x80000040; -          pci_write_config_dword(pdev, DEVCONFIG, devconfig); -#endif /* AIC7XXX_STRICT_PCI_SETUP */ - -          temp_p->unpause = INTEN; -          temp_p->pause = temp_p->unpause | PAUSE; -          if ( ((temp_p->base == 0) && -                (temp_p->mbase == 0)) || -               (temp_p->irq == 0) ) -          { -            printk("aic7xxx: <%s> at PCI %d/%d/%d\n",  -              board_names[aic_pdevs[i].board_name_index], -              temp_p->pci_bus, -              PCI_SLOT(temp_p->pci_device_fn), -              PCI_FUNC(temp_p->pci_device_fn)); -            printk("aic7xxx: Controller disabled by BIOS, ignoring.\n"); -            goto skip_pci_controller; -          } - -#ifdef MMAPIO -          if ( !(temp_p->base) || !(temp_p->flags & AHC_MULTI_CHANNEL) || -               ((temp_p->chip != (AHC_AIC7870 | AHC_PCI)) && -                (temp_p->chip != (AHC_AIC7880 | AHC_PCI))) ) -          { -            temp_p->maddr = ioremap_nocache(temp_p->mbase, 256); -            if(temp_p->maddr) -            { -              /* -               * We need to check the I/O with the MMAPed address.  Some machines -               * simply fail to work with MMAPed I/O and certain controllers. -               */ -              if(aic_inb(temp_p, HCNTRL) == 0xff) -              { -                /* -                 * OK.....we failed our test....go back to programmed I/O -                 */ -                printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d/%d\n",  -                  board_names[aic_pdevs[i].board_name_index], -                  temp_p->pci_bus, -                  PCI_SLOT(temp_p->pci_device_fn), -                  PCI_FUNC(temp_p->pci_device_fn)); -                printk(KERN_INFO "aic7xxx: MMAPed I/O failed, reverting to " -                                 "Programmed I/O.\n"); -                iounmap(temp_p->maddr); -                temp_p->maddr = NULL; -                if(temp_p->base == 0) -                { -                  printk("aic7xxx: <%s> at PCI %d/%d/%d\n",  -                    board_names[aic_pdevs[i].board_name_index], -                    temp_p->pci_bus, -                    PCI_SLOT(temp_p->pci_device_fn), -                    PCI_FUNC(temp_p->pci_device_fn)); -                  printk("aic7xxx: Controller disabled by BIOS, ignoring.\n"); -                  goto skip_pci_controller; -                } -              } -            } -          } -#endif - -          /* -           * We HAVE to make sure the first pause_sequencer() and all other -           * subsequent I/O that isn't PCI config space I/O takes place -           * after the MMAPed I/O region is configured and tested.  The -           * problem is the PowerPC architecture that doesn't support -           * programmed I/O at all, so we have to have the MMAP I/O set up -           * for this pause to even work on those machines. -           */ -          pause_sequencer(temp_p); - -          /* -           * Clear out any pending PCI error status messages.  Also set -           * verbose to 0 so that we don't emit strange PCI error messages -           * while cleaning out the current status bits. -           */ -          oldverbose = aic7xxx_verbose; -          aic7xxx_verbose = 0; -          aic7xxx_pci_intr(temp_p); -          aic7xxx_verbose = oldverbose; - -          temp_p->bios_address = 0; - -          /* -           * Remember how the card was setup in case there is no seeprom. -           */ -          if (temp_p->features & AHC_ULTRA2) -            temp_p->scsi_id = aic_inb(temp_p, SCSIID_ULTRA2) & OID; -          else -            temp_p->scsi_id = aic_inb(temp_p, SCSIID) & OID; -          /* -           * Get current termination setting -           */ -          sxfrctl1 = aic_inb(temp_p, SXFRCTL1); - -          if (aic7xxx_chip_reset(temp_p) == -1) -          { -            goto skip_pci_controller; -          } -          /* -           * Very quickly put the term setting back into the register since -           * the chip reset may cause odd things to happen.  This is to keep -           * LVD busses with lots of drives from draining the power out of -           * the diffsense line before we get around to running the -           * configure_termination() function.  Also restore the STPWLEVEL -           * bit of DEVCONFIG -           */ -          aic_outb(temp_p, sxfrctl1, SXFRCTL1); -          pci_write_config_dword(temp_p->pdev, DEVCONFIG, devconfig); -          sxfrctl1 &= STPWEN; - -          /* -           * We need to set the CHNL? assignments before loading the SEEPROM -           * The 3940 and 3985 cards (original stuff, not any of the later -           * stuff) are 7870 and 7880 class chips.  The Ultra2 stuff falls -           * under 7896 and 7897.  The 7895 is in a class by itself :) -           */ -          switch (temp_p->chip & AHC_CHIPID_MASK) -          { -            case AHC_AIC7870: /* 3840 / 3985 */ -            case AHC_AIC7880: /* 3840 UW / 3985 UW */ -              if(temp_p->flags & AHC_MULTI_CHANNEL) -              { -                switch(PCI_SLOT(temp_p->pci_device_fn)) -                { -                  case 5: -                    temp_p->flags |= AHC_CHNLB; -                    break; -                  case 8: -                    temp_p->flags |= AHC_CHNLB; -                    break; -                  case 12: -                    temp_p->flags |= AHC_CHNLC; -                    break; -                  default: -                    break; -                } -              } -              break; - -            case AHC_AIC7895: /* 7895 */ -            case AHC_AIC7896: /* 7896/7 */ -            case AHC_AIC7899: /* 7899 */ -              if (PCI_FUNC(pdev->devfn) != 0) -              { -                temp_p->flags |= AHC_CHNLB; -              } -              /* -               * The 7895 is the only chipset that sets the SCBSIZE32 param -               * in the DEVCONFIG register.  The Ultra2 chipsets use -               * the DSCOMMAND0 register instead. -               */ -              if ((temp_p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) -              { -                pci_read_config_dword(pdev, DEVCONFIG, &devconfig); -                devconfig |= SCBSIZE32; -                pci_write_config_dword(pdev, DEVCONFIG, devconfig); -              } -              break; -            default: -              break; -          } - -          /* -           * Loading of the SEEPROM needs to come after we've set the flags -           * to indicate possible CHNLB and CHNLC assigments.  Otherwise, -           * on 394x and 398x cards we'll end up reading the wrong settings -           * for channels B and C -           */ -          switch (temp_p->chip & AHC_CHIPID_MASK) -          { -            case AHC_AIC7892: -            case AHC_AIC7899: -              aic_outb(temp_p, 0, SCAMCTL); -              /* -               * Switch to the alt mode of the chip... -               */ -              aic_outb(temp_p, aic_inb(temp_p, SFUNCT) | ALT_MODE, SFUNCT); -              /* -               * Set our options...the last two items set our CRC after x byte -	       * count in target mode... -               */ -              aic_outb(temp_p, AUTO_MSGOUT_DE | DIS_MSGIN_DUALEDGE, OPTIONMODE); -	      aic_outb(temp_p, 0x00, 0x0b); -	      aic_outb(temp_p, 0x10, 0x0a); -              /* -               * switch back to normal mode... -               */ -              aic_outb(temp_p, aic_inb(temp_p, SFUNCT) & ~ALT_MODE, SFUNCT); -              aic_outb(temp_p, CRCVALCHKEN | CRCENDCHKEN | CRCREQCHKEN | -			       TARGCRCENDEN | TARGCRCCNTEN, -                       CRCCONTROL1); -              aic_outb(temp_p, ((aic_inb(temp_p, DSCOMMAND0) | USCBSIZE32 | -                                 MPARCKEN | CIOPARCKEN | CACHETHEN) &  -                               ~DPARCKEN), DSCOMMAND0); -              aic7xxx_load_seeprom(temp_p, &sxfrctl1); -              break; -            case AHC_AIC7890: -            case AHC_AIC7896: -              aic_outb(temp_p, 0, SCAMCTL); -              aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) | -                                CACHETHEN | MPARCKEN | USCBSIZE32 | -                                CIOPARCKEN) & ~DPARCKEN, DSCOMMAND0); -              aic7xxx_load_seeprom(temp_p, &sxfrctl1); -              break; -            case AHC_AIC7850: -            case AHC_AIC7860: -              /* -               * Set the DSCOMMAND0 register on these cards different from -               * on the 789x cards.  Also, read the SEEPROM as well. -               */ -              aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) | -                                CACHETHEN | MPARCKEN) & ~DPARCKEN, -                       DSCOMMAND0); -              /* FALLTHROUGH */ -            default: -              aic7xxx_load_seeprom(temp_p, &sxfrctl1); -              break; -            case AHC_AIC7880: -              /* -               * Check the rev of the chipset before we change DSCOMMAND0 -               */ -              pci_read_config_dword(pdev, DEVCONFIG, &devconfig); -              if ((devconfig & 0xff) >= 1) -              { -                aic_outb(temp_p, (aic_inb(temp_p, DSCOMMAND0) | -                                  CACHETHEN | MPARCKEN) & ~DPARCKEN, -                         DSCOMMAND0); -              } -              aic7xxx_load_seeprom(temp_p, &sxfrctl1); -              break; -          } -           - -          /* -           * and then we need another switch based on the type in order to -           * make sure the channel B primary flag is set properly on 7895 -           * controllers....Arrrgggghhh!!!  We also have to catch the fact -           * that when you disable the BIOS on the 7895 on the Intel DK440LX -           * motherboard, and possibly others, it only sets the BIOS disabled -           * bit on the A channel...I think I'm starting to lean towards -           * going postal.... -           */ -          switch(temp_p->chip & AHC_CHIPID_MASK) -          { -            case AHC_AIC7895: -            case AHC_AIC7896: -            case AHC_AIC7899: -              current_p = list_p; -              while(current_p != NULL) -              { -                if ( (current_p->pci_bus == temp_p->pci_bus) && -                     (PCI_SLOT(current_p->pci_device_fn) == -                      PCI_SLOT(temp_p->pci_device_fn)) ) -                { -                  if ( PCI_FUNC(current_p->pci_device_fn) == 0 ) -                  { -                    temp_p->flags |=  -                      (current_p->flags & AHC_CHANNEL_B_PRIMARY); -                    temp_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS); -                    temp_p->flags |= -                      (current_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS)); -                  } -                  else -                  { -                    current_p->flags |= -                      (temp_p->flags & AHC_CHANNEL_B_PRIMARY); -                    current_p->flags &= ~(AHC_BIOS_ENABLED|AHC_USEDEFAULTS); -                    current_p->flags |= -                      (temp_p->flags & (AHC_BIOS_ENABLED|AHC_USEDEFAULTS)); -                  } -                } -                current_p = current_p->next; -              } -              break; -            default: -              break; -          } - -          /* -           * We only support external SCB RAM on the 7895/6/7 chipsets. -           * We could support it on the 7890/1 easy enough, but I don't -           * know of any 7890/1 based cards that have it.  I do know -           * of 7895/6/7 cards that have it and they work properly. -           */ -          switch(temp_p->chip & AHC_CHIPID_MASK) -          { -            default: -              break; -            case AHC_AIC7895: -            case AHC_AIC7896: -            case AHC_AIC7899: -              pci_read_config_dword(pdev, DEVCONFIG, &devconfig); -              if (temp_p->features & AHC_ULTRA2) -              { -                if ( (aic_inb(temp_p, DSCOMMAND0) & RAMPSM_ULTRA2) && -                     (aic7xxx_scbram) ) -                { -                  aic_outb(temp_p, -                           aic_inb(temp_p, DSCOMMAND0) & ~SCBRAMSEL_ULTRA2, -                           DSCOMMAND0); -                  temp_p->flags |= AHC_EXTERNAL_SRAM; -                  devconfig |= EXTSCBPEN; -                } -                else if (aic_inb(temp_p, DSCOMMAND0) & RAMPSM_ULTRA2) -                { -                  printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d/%d\n",  -                    board_names[aic_pdevs[i].board_name_index], -                    temp_p->pci_bus, -                    PCI_SLOT(temp_p->pci_device_fn), -                    PCI_FUNC(temp_p->pci_device_fn)); -                  printk("aic7xxx: external SCB RAM detected, " -                         "but not enabled\n"); -                } -              } -              else -              { -                if ((devconfig & RAMPSM) && (aic7xxx_scbram)) -                { -                  devconfig &= ~SCBRAMSEL; -                  devconfig |= EXTSCBPEN; -                  temp_p->flags |= AHC_EXTERNAL_SRAM; -                } -                else if (devconfig & RAMPSM) -                { -                  printk(KERN_INFO "aic7xxx: <%s> at PCI %d/%d/%d\n",  -                    board_names[aic_pdevs[i].board_name_index], -                    temp_p->pci_bus, -                    PCI_SLOT(temp_p->pci_device_fn), -                    PCI_FUNC(temp_p->pci_device_fn)); -                  printk("aic7xxx: external SCB RAM detected, " -                         "but not enabled\n"); -                } -              } -              pci_write_config_dword(pdev, DEVCONFIG, devconfig); -              if ( (temp_p->flags & AHC_EXTERNAL_SRAM) && -                   (temp_p->flags & AHC_CHNLB) ) -                aic_outb(temp_p, 1, CCSCBBADDR); -              break; -          } - -          /* -           * Take the LED out of diagnostic mode -           */ -          aic_outb(temp_p,  -            (aic_inb(temp_p, SBLKCTL) & ~(DIAGLEDEN | DIAGLEDON)), -            SBLKCTL); - -          /* -           * We don't know where this is set in the SEEPROM or by the -           * BIOS, so we default to 100%.  On Ultra2 controllers, use 75% -           * instead. -           */ -          if (temp_p->features & AHC_ULTRA2) -          { -            aic_outb(temp_p, RD_DFTHRSH_MAX | WR_DFTHRSH_MAX, DFF_THRSH); -          } -          else -          { -            aic_outb(temp_p, DFTHRSH_100, DSPCISTATUS); -          } - -          /* -           * Call our function to fixup any bugs that exist on this chipset. -           * This may muck with PCI settings and other device settings, so -           * make sure it's after all the other PCI and device register -           * tweaks so it can back out bad settings on specific broken cards. -           */ -          aic7xxx_configure_bugs(temp_p); - -          /* Hold a pci device reference */ -          pci_dev_get(temp_p->pdev); - -          if ( list_p == NULL ) -          { -            list_p = current_p = temp_p; -          } -          else -          { -            current_p = list_p; -            while(current_p->next != NULL) -              current_p = current_p->next; -            current_p->next = temp_p; -          } -          temp_p->next = NULL; -          found++; -	  continue; -skip_pci_controller: -#ifdef CONFIG_PCI -	  pci_release_regions(temp_p->pdev); -#endif -	  kfree(temp_p); -        }  /* Found an Adaptec PCI device. */ -        else /* Well, we found one, but we couldn't get any memory */ -        { -          printk("aic7xxx: Found <%s>\n",  -            board_names[aic_pdevs[i].board_name_index]); -          printk(KERN_INFO "aic7xxx: Unable to allocate device memory, " -            "skipping.\n"); -        } -      } /* while(pdev=....) */ -    } /* for PCI_DEVICES */ -  } -#endif /* CONFIG_PCI */ - -#if defined(__i386__) || defined(__alpha__) -  /* -   * EISA/VL-bus card signature probe. -   */ -  slot = MINSLOT; -  while ( (slot <= MAXSLOT) && -         !(aic7xxx_no_probe) ) -  { -    base = SLOTBASE(slot) + MINREG; - -    if (!request_region(base, MAXREG - MINREG, "aic7xxx")) -    { -      /* -       * Some other driver has staked a -       * claim to this i/o region already. -       */ -      slot++; -      continue; /* back to the beginning of the for loop */ -    } -    flags = 0; -    type = aic7xxx_probe(slot, base + AHC_HID0, &flags); -    if (type == -1) -    { -      release_region(base, MAXREG - MINREG); -      slot++; -      continue; -    } -    temp_p = kmalloc(sizeof(struct aic7xxx_host), GFP_ATOMIC); -    if (temp_p == NULL) -    { -      printk(KERN_WARNING "aic7xxx: Unable to allocate device space.\n"); -      release_region(base, MAXREG - MINREG); -      slot++; -      continue; /* back to the beginning of the while loop */ -    } - -    /* -     * Pause the card preserving the IRQ type.  Allow the operator -     * to override the IRQ trigger. -     */ -    if (aic7xxx_irq_trigger == 1) -      hcntrl = IRQMS;  /* Level */ -    else if (aic7xxx_irq_trigger == 0) -      hcntrl = 0;  /* Edge */ -    else -      hcntrl = inb(base + HCNTRL) & IRQMS;  /* Default */ -    memset(temp_p, 0, sizeof(struct aic7xxx_host)); -    temp_p->unpause = hcntrl | INTEN; -    temp_p->pause = hcntrl | PAUSE | INTEN; -    temp_p->base = base; -    temp_p->mbase = 0; -    temp_p->maddr = NULL; -    temp_p->pci_bus = 0; -    temp_p->pci_device_fn = slot; -    aic_outb(temp_p, hcntrl | PAUSE, HCNTRL); -    while( (aic_inb(temp_p, HCNTRL) & PAUSE) == 0 ) ; -    if (aic7xxx_chip_reset(temp_p) == -1) -      temp_p->irq = 0; -    else -      temp_p->irq = aic_inb(temp_p, INTDEF) & 0x0F; -    temp_p->flags |= AHC_PAGESCBS; - -    switch (temp_p->irq) -    { -      case 9: -      case 10: -      case 11: -      case 12: -      case 14: -      case 15: -        break; - -      default: -        printk(KERN_WARNING "aic7xxx: Host adapter uses unsupported IRQ " -          "level %d, ignoring.\n", temp_p->irq); -        kfree(temp_p); -        release_region(base, MAXREG - MINREG); -        slot++; -        continue; /* back to the beginning of the while loop */ -    } - -    /* -     * We are committed now, everything has been checked and this card -     * has been found, now we just set it up -     */ - -    /* -     * Insert our new struct into the list at the end -     */ -    if (list_p == NULL) -    { -      list_p = current_p = temp_p; -    } -    else -    { -      current_p = list_p; -      while (current_p->next != NULL) -        current_p = current_p->next; -      current_p->next = temp_p; -    } - -    switch (type) -    { -      case 0: -        temp_p->board_name_index = 2; -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk("aic7xxx: <%s> at EISA %d\n", -               board_names[2], slot); -        /* FALLTHROUGH */ -      case 1: -      { -        temp_p->chip = AHC_AIC7770 | AHC_EISA; -        temp_p->features |= AHC_AIC7770_FE; -        temp_p->bios_control = aic_inb(temp_p, HA_274_BIOSCTRL); - -        /* -         * Get the primary channel information.  Right now we don't -         * do anything with this, but someday we will be able to inform -         * the mid-level SCSI code which channel is primary. -         */ -        if (temp_p->board_name_index == 0) -        { -          temp_p->board_name_index = 3; -          if (aic7xxx_verbose & VERBOSE_PROBE2) -            printk("aic7xxx: <%s> at EISA %d\n", -                 board_names[3], slot); -        } -        if (temp_p->bios_control & CHANNEL_B_PRIMARY) -        { -          temp_p->flags |= AHC_CHANNEL_B_PRIMARY; -        } - -        if ((temp_p->bios_control & BIOSMODE) == BIOSDISABLED) -        { -          temp_p->flags &= ~AHC_BIOS_ENABLED; -        } -        else -        { -          temp_p->flags &= ~AHC_USEDEFAULTS; -          temp_p->flags |= AHC_BIOS_ENABLED; -          if ( (temp_p->bios_control & 0x20) == 0 ) -          { -            temp_p->bios_address = 0xcc000; -            temp_p->bios_address += (0x4000 * (temp_p->bios_control & 0x07)); -          } -          else -          { -            temp_p->bios_address = 0xd0000; -            temp_p->bios_address += (0x8000 * (temp_p->bios_control & 0x06)); -          } -        } -        temp_p->adapter_control = aic_inb(temp_p, SCSICONF) << 8; -        temp_p->adapter_control |= aic_inb(temp_p, SCSICONF + 1); -        if (temp_p->features & AHC_WIDE) -        { -          temp_p->scsi_id = temp_p->adapter_control & HWSCSIID; -          temp_p->scsi_id_b = temp_p->scsi_id; -        } -        else -        { -          temp_p->scsi_id = (temp_p->adapter_control >> 8) & HSCSIID; -          temp_p->scsi_id_b = temp_p->adapter_control & HSCSIID; -        } -        aic7xxx_load_seeprom(temp_p, &sxfrctl1); -        break; -      } - -      case 2: -      case 3: -        temp_p->chip = AHC_AIC7770 | AHC_VL; -        temp_p->features |= AHC_AIC7770_FE; -        if (type == 2) -          temp_p->flags |= AHC_BIOS_ENABLED; -        else -          temp_p->flags &= ~AHC_BIOS_ENABLED; -        if (aic_inb(temp_p, SCSICONF) & TERM_ENB) -          sxfrctl1 = STPWEN; -        aic7xxx_load_seeprom(temp_p, &sxfrctl1); -        temp_p->board_name_index = 4; -        if (aic7xxx_verbose & VERBOSE_PROBE2) -          printk("aic7xxx: <%s> at VLB %d\n", -               board_names[2], slot); -        switch( aic_inb(temp_p, STATUS_2840) & BIOS_SEL ) -        { -          case 0x00: -            temp_p->bios_address = 0xe0000; -            break; -          case 0x20: -            temp_p->bios_address = 0xc8000; -            break; -          case 0x40: -            temp_p->bios_address = 0xd0000; -            break; -          case 0x60: -            temp_p->bios_address = 0xd8000; -            break; -          default: -            break; /* can't get here */ -        } -        break; - -      default:  /* Won't get here. */ -        break; -    } -    if (aic7xxx_verbose & VERBOSE_PROBE2) -    { -      printk(KERN_INFO "aic7xxx: BIOS %sabled, IO Port 0x%lx, IRQ %d (%s)\n", -        (temp_p->flags & AHC_USEDEFAULTS) ? "dis" : "en", temp_p->base, -        temp_p->irq, -        (temp_p->pause & IRQMS) ? "level sensitive" : "edge triggered"); -      printk(KERN_INFO "aic7xxx: Extended translation %sabled.\n", -             (temp_p->flags & AHC_EXTEND_TRANS_A) ? "en" : "dis"); -    } - -    /* -     * All the 7770 based chipsets have this bug -     */ -    temp_p->bugs |= AHC_BUG_TMODE_WIDEODD; - -    /* -     * Set the FIFO threshold and the bus off time. -     */ -    hostconf = aic_inb(temp_p, HOSTCONF); -    aic_outb(temp_p, hostconf & DFTHRSH, BUSSPD); -    aic_outb(temp_p, (hostconf << 2) & BOFF, BUSTIME); -    slot++; -    found++; -  } - -#endif /* defined(__i386__) || defined(__alpha__) */ - -  /* -   * Now, we re-order the probed devices by BIOS address and BUS class. -   * In general, we follow this algorithm to make the adapters show up -   * in the same order under linux that the computer finds them. -   *  1: All VLB/EISA cards with BIOS_ENABLED first, according to BIOS -   *     address, going from lowest to highest. -   *  2: All PCI controllers with BIOS_ENABLED next, according to BIOS -   *     address, going from lowest to highest. -   *  3: Remaining VLB/EISA controllers going in slot order. -   *  4: Remaining PCI controllers, going in PCI device order (reversible) -   */ - -  { -    struct aic7xxx_host *sort_list[4] = { NULL, NULL, NULL, NULL }; -    struct aic7xxx_host *vlb, *pci; -    struct aic7xxx_host *prev_p; -    struct aic7xxx_host *p; -    unsigned char left; - -    prev_p = vlb = pci = NULL; - -    temp_p = list_p; -    while (temp_p != NULL) -    { -      switch(temp_p->chip & ~AHC_CHIPID_MASK) -      { -        case AHC_EISA: -        case AHC_VL: -        { -          p = temp_p; -          if (p->flags & AHC_BIOS_ENABLED) -            vlb = sort_list[0]; -          else -            vlb = sort_list[2]; - -          if (vlb == NULL) -          { -            vlb = temp_p; -            temp_p = temp_p->next; -            vlb->next = NULL; -          } -          else -          { -            current_p = vlb; -            prev_p = NULL; -            while ( (current_p != NULL) && -                    (current_p->bios_address < temp_p->bios_address)) -            { -              prev_p = current_p; -              current_p = current_p->next; -            } -            if (prev_p != NULL) -            { -              prev_p->next = temp_p; -              temp_p = temp_p->next; -              prev_p->next->next = current_p; -            } -            else -            { -              vlb = temp_p; -              temp_p = temp_p->next; -              vlb->next = current_p; -            } -          } -           -          if (p->flags & AHC_BIOS_ENABLED) -            sort_list[0] = vlb; -          else -            sort_list[2] = vlb; -           -          break; -        } -        default:  /* All PCI controllers fall through to default */ -        { - -          p = temp_p; -          if (p->flags & AHC_BIOS_ENABLED)  -            pci = sort_list[1]; -          else -            pci = sort_list[3]; - -          if (pci == NULL) -          { -            pci = temp_p; -            temp_p = temp_p->next; -            pci->next = NULL; -          } -          else -          { -            current_p = pci; -            prev_p = NULL; -            if (!aic7xxx_reverse_scan) -            { -              while ( (current_p != NULL) && -                      ( (PCI_SLOT(current_p->pci_device_fn) | -                        (current_p->pci_bus << 8)) <  -                        (PCI_SLOT(temp_p->pci_device_fn) | -                        (temp_p->pci_bus << 8)) ) ) -              { -                prev_p = current_p; -                current_p = current_p->next; -              } -            } -            else -            { -              while ( (current_p != NULL) && -                      ( (PCI_SLOT(current_p->pci_device_fn) | -                        (current_p->pci_bus << 8)) >  -                        (PCI_SLOT(temp_p->pci_device_fn) | -                        (temp_p->pci_bus << 8)) ) ) -              { -                prev_p = current_p; -                current_p = current_p->next; -              } -            } -            /* -             * Are we dealing with a 7895/6/7/9 where we need to sort the -             * channels as well, if so, the bios_address values should -             * be the same -             */ -            if ( (current_p) && (temp_p->flags & AHC_MULTI_CHANNEL) && -                 (temp_p->pci_bus == current_p->pci_bus) && -                 (PCI_SLOT(temp_p->pci_device_fn) == -                  PCI_SLOT(current_p->pci_device_fn)) ) -            { -              if (temp_p->flags & AHC_CHNLB) -              { -                if ( !(temp_p->flags & AHC_CHANNEL_B_PRIMARY) ) -                { -                  prev_p = current_p; -                  current_p = current_p->next; -                } -              } -              else -              { -                if (temp_p->flags & AHC_CHANNEL_B_PRIMARY) -                { -                  prev_p = current_p; -                  current_p = current_p->next; -                } -              } -            } -            if (prev_p != NULL) -            { -              prev_p->next = temp_p; -              temp_p = temp_p->next; -              prev_p->next->next = current_p; -            } -            else -            { -              pci = temp_p; -              temp_p = temp_p->next; -              pci->next = current_p; -            } -          } - -          if (p->flags & AHC_BIOS_ENABLED) -            sort_list[1] = pci; -          else -            sort_list[3] = pci; - -          break; -        } -      }  /* End of switch(temp_p->type) */ -    } /* End of while (temp_p != NULL) */ -    /* -     * At this point, the cards have been broken into 4 sorted lists, now -     * we run through the lists in order and register each controller -     */ -    { -      int i; -       -      left = found; -      for (i=0; i<ARRAY_SIZE(sort_list); i++) -      { -        temp_p = sort_list[i]; -        while(temp_p != NULL) -        { -          template->name = board_names[temp_p->board_name_index]; -          p = aic7xxx_alloc(template, temp_p); -          if (p != NULL) -          { -            p->instance = found - left; -            if (aic7xxx_register(template, p, (--left)) == 0) -            { -              found--; -              aic7xxx_release(p->host); -              scsi_unregister(p->host); -            } -            else if (aic7xxx_dump_card) -            { -              pause_sequencer(p); -              aic7xxx_print_card(p); -              aic7xxx_print_scratch_ram(p); -              unpause_sequencer(p, TRUE); -            } -          } -          current_p = temp_p; -          temp_p = (struct aic7xxx_host *)temp_p->next; -          kfree(current_p); -        } -      } -    } -  } -  return (found); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_buildscb - * - * Description: - *   Build a SCB. - *-F*************************************************************************/ -static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd, -			     struct aic7xxx_scb *scb) -{ -  unsigned short mask; -  struct aic7xxx_hwscb *hscb; -  struct aic_dev_data *aic_dev = cmd->device->hostdata; -  struct scsi_device *sdptr = cmd->device; -  unsigned char tindex = TARGET_INDEX(cmd); -  int use_sg; - -  mask = (0x01 << tindex); -  hscb = scb->hscb; - -  /* -   * Setup the control byte if we need negotiation and have not -   * already requested it. -   */ -  hscb->control = 0; -  scb->tag_action = 0; - -  if (p->discenable & mask) -  { -    hscb->control |= DISCENB; -    /* We always force TEST_UNIT_READY to untagged */ -    if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags) -    { -      hscb->control |= MSG_SIMPLE_Q_TAG; -      scb->tag_action = MSG_SIMPLE_Q_TAG; -    } -  } -  if ( !(aic_dev->dtr_pending) && -        (aic_dev->needppr || aic_dev->needwdtr || aic_dev->needsdtr) && -        (aic_dev->flags & DEVICE_DTR_SCANNED) ) -  { -    aic_dev->dtr_pending = 1; -    scb->tag_action = 0; -    hscb->control &= DISCENB; -    hscb->control |= MK_MESSAGE; -    if(aic_dev->needppr) -    { -      scb->flags |= SCB_MSGOUT_PPR; -    } -    else if(aic_dev->needwdtr) -    { -      scb->flags |= SCB_MSGOUT_WDTR; -    } -    else if(aic_dev->needsdtr) -    { -      scb->flags |= SCB_MSGOUT_SDTR; -    } -    scb->flags |= SCB_DTR_SCB; -  } -  hscb->target_channel_lun = ((cmd->device->id << 4) & 0xF0) | -        ((cmd->device->channel & 0x01) << 3) | (cmd->device->lun & 0x07); - -  /* -   * The interpretation of request_buffer and request_bufflen -   * changes depending on whether or not use_sg is zero; a -   * non-zero use_sg indicates the number of elements in the -   * scatter-gather array. -   */ - -  /* -   * XXX - this relies on the host data being stored in a -   *       little-endian format. -   */ -  hscb->SCSI_cmd_length = cmd->cmd_len; -  memcpy(scb->cmnd, cmd->cmnd, cmd->cmd_len); -  hscb->SCSI_cmd_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, scb->cmnd)); - -  use_sg = scsi_dma_map(cmd); -  BUG_ON(use_sg < 0); - -  if (use_sg) { -    struct scatterlist *sg;  /* Must be mid-level SCSI code scatterlist */ - -    /* -     * We must build an SG list in adapter format, as the kernel's SG list -     * cannot be used directly because of data field size (__alpha__) -     * differences and the kernel SG list uses virtual addresses where -     * we need physical addresses. -     */ -    int i; - -    scb->sg_length = 0; - - -    /* -     * Copy the segments into the SG array.  NOTE!!! - We used to -     * have the first entry both in the data_pointer area and the first -     * SG element.  That has changed somewhat.  We still have the first -     * entry in both places, but now we download the address of -     * scb->sg_list[1] instead of 0 to the sg pointer in the hscb. -     */ -    scsi_for_each_sg(cmd, sg, use_sg, i) { -      unsigned int len = sg_dma_len(sg); -      scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg)); -      scb->sg_list[i].length = cpu_to_le32(len); -      scb->sg_length += len; -    } -    /* Copy the first SG into the data pointer area. */ -    hscb->data_pointer = scb->sg_list[0].address; -    hscb->data_count = scb->sg_list[0].length; -    scb->sg_count = i; -    hscb->SG_segment_count = i; -    hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[1])); -  } else { -      scb->sg_count = 0; -      scb->sg_length = 0; -      hscb->SG_segment_count = 0; -      hscb->SG_list_pointer = 0; -      hscb->data_count = 0; -      hscb->data_pointer = 0; -  } -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_queue - * - * Description: - *   Queue a SCB to the controller. - *-F*************************************************************************/ -static int aic7xxx_queue_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) -{ -  struct aic7xxx_host *p; -  struct aic7xxx_scb *scb; -  struct aic_dev_data *aic_dev; - -  p = (struct aic7xxx_host *) cmd->device->host->hostdata; - -  aic_dev = cmd->device->hostdata;   -#ifdef AIC7XXX_VERBOSE_DEBUGGING -  if (aic_dev->active_cmds > aic_dev->max_q_depth) -  { -    printk(WARN_LEAD "Commands queued exceeds queue " -           "depth, active=%d\n", -           p->host_no, CTL_OF_CMD(cmd),  -           aic_dev->active_cmds); -  } -#endif - -  scb = scbq_remove_head(&p->scb_data->free_scbs); -  if (scb == NULL) -  { -    aic7xxx_allocate_scb(p); -    scb = scbq_remove_head(&p->scb_data->free_scbs); -    if(scb == NULL) -    { -      printk(WARN_LEAD "Couldn't get a free SCB.\n", p->host_no, -             CTL_OF_CMD(cmd)); -      return 1; -    } -  } -  scb->cmd = cmd; - -	/* -	* Make sure the scsi_cmnd pointer is saved, the struct it points to -	* is set up properly, and the parity error flag is reset, then send -	* the SCB to the sequencer and watch the fun begin. -	*/ -  aic7xxx_position(cmd) = scb->hscb->tag; -  cmd->scsi_done = fn; -  cmd->result = DID_OK; -  aic7xxx_error(cmd) = DID_OK; -  aic7xxx_status(cmd) = 0; -  cmd->host_scribble = NULL; - -  /* -   * Construct the SCB beforehand, so the sequencer is -   * paused a minimal amount of time. -   */ -  aic7xxx_buildscb(p, cmd, scb); - -  scb->flags |= SCB_ACTIVE | SCB_WAITINGQ; - -  scbq_insert_tail(&p->waiting_scbs, scb); -  aic7xxx_run_waiting_queues(p); -  return (0); -} - -static DEF_SCSI_QCMD(aic7xxx_queue) - -/*+F************************************************************************* - * Function: - *   aic7xxx_bus_device_reset - * - * Description: - *   Abort or reset the current SCSI command(s).  If the scb has not - *   previously been aborted, then we attempt to send a BUS_DEVICE_RESET - *   message to the target.  If the scb has previously been unsuccessfully - *   aborted, then we will reset the channel and have all devices renegotiate. - *   Returns an enumerated type that indicates the status of the operation. - *-F*************************************************************************/ -static int __aic7xxx_bus_device_reset(struct scsi_cmnd *cmd) -{ -  struct aic7xxx_host  *p; -  struct aic7xxx_scb   *scb; -  struct aic7xxx_hwscb *hscb; -  int channel; -  unsigned char saved_scbptr, lastphase; -  unsigned char hscb_index; -  int disconnected; -  struct aic_dev_data *aic_dev; - -  if(cmd == NULL) -  { -    printk(KERN_ERR "aic7xxx_bus_device_reset: called with NULL cmd!\n"); -    return FAILED; -  } -  p = (struct aic7xxx_host *)cmd->device->host->hostdata; -  aic_dev = AIC_DEV(cmd); -  if(aic7xxx_position(cmd) < p->scb_data->numscbs) -    scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]); -  else -    return FAILED; - -  hscb = scb->hscb; - -  aic7xxx_isr(p); -  aic7xxx_done_cmds_complete(p); -  /* If the command was already complete or just completed, then we didn't -   * do a reset, return FAILED */ -  if(!(scb->flags & SCB_ACTIVE)) -    return FAILED; - -  pause_sequencer(p); -  lastphase = aic_inb(p, LASTPHASE); -  if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) -  { -    printk(INFO_LEAD "Bus Device reset, scb flags 0x%x, ", -         p->host_no, CTL_OF_SCB(scb), scb->flags); -    switch (lastphase) -    { -      case P_DATAOUT: -        printk("Data-Out phase\n"); -        break; -      case P_DATAIN: -        printk("Data-In phase\n"); -        break; -      case P_COMMAND: -        printk("Command phase\n"); -        break; -      case P_MESGOUT: -        printk("Message-Out phase\n"); -        break; -      case P_STATUS: -        printk("Status phase\n"); -        break; -      case P_MESGIN: -        printk("Message-In phase\n"); -        break; -      default: -      /* -       * We're not in a valid phase, so assume we're idle. -       */ -        printk("while idle, LASTPHASE = 0x%x\n", lastphase); -        break; -    } -    printk(INFO_LEAD "SCSISIGI 0x%x, SEQADDR 0x%x, SSTAT0 0x%x, SSTAT1 " -         "0x%x\n", p->host_no, CTL_OF_SCB(scb), -         aic_inb(p, SCSISIGI), -         aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), -         aic_inb(p, SSTAT0), aic_inb(p, SSTAT1)); -    printk(INFO_LEAD "SG_CACHEPTR 0x%x, SSTAT2 0x%x, STCNT 0x%x\n", p->host_no, -         CTL_OF_SCB(scb), -         (p->features & AHC_ULTRA2) ? aic_inb(p, SG_CACHEPTR) : 0, -         aic_inb(p, SSTAT2), -         aic_inb(p, STCNT + 2) << 16 | aic_inb(p, STCNT + 1) << 8 | -         aic_inb(p, STCNT)); -  } - -  channel = cmd->device->channel; - -    /* -     * Send a Device Reset Message: -     * The target that is holding up the bus may not be the same as -     * the one that triggered this timeout (different commands have -     * different timeout lengths).  Our strategy here is to queue an -     * abort message to the timed out target if it is disconnected. -     * Otherwise, if we have an active target we stuff the message buffer -     * with an abort message and assert ATN in the hopes that the target -     * will let go of the bus and go to the mesgout phase.  If this -     * fails, we'll get another timeout a few seconds later which will -     * attempt a bus reset. -     */ -  saved_scbptr = aic_inb(p, SCBPTR); -  disconnected = FALSE; - -  if (lastphase != P_BUSFREE) -  { -    if (aic_inb(p, SCB_TAG) >= p->scb_data->numscbs) -    { -      printk(WARN_LEAD "Invalid SCB ID %d is active, " -             "SCB flags = 0x%x.\n", p->host_no, -            CTL_OF_CMD(cmd), scb->hscb->tag, scb->flags); -      unpause_sequencer(p, FALSE); -      return FAILED; -    } -    if (scb->hscb->tag == aic_inb(p, SCB_TAG)) -    {  -      if ( (lastphase == P_MESGOUT) || (lastphase == P_MESGIN) ) -      { -        printk(WARN_LEAD "Device reset, Message buffer " -                "in use\n", p->host_no, CTL_OF_SCB(scb)); -        unpause_sequencer(p, FALSE); -	return FAILED; -      } -	 -      if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) -        printk(INFO_LEAD "Device reset message in " -              "message buffer\n", p->host_no, CTL_OF_SCB(scb)); -      scb->flags |= SCB_RESET | SCB_DEVICE_RESET; -      aic7xxx_error(cmd) = DID_RESET; -      aic_dev->flags |= BUS_DEVICE_RESET_PENDING; -      /* Send the abort message to the active SCB. */ -      aic_outb(p, HOST_MSG, MSG_OUT); -      aic_outb(p, lastphase | ATNO, SCSISIGO); -      unpause_sequencer(p, FALSE); -      spin_unlock_irq(p->host->host_lock); -      ssleep(1); -      spin_lock_irq(p->host->host_lock); -      if(aic_dev->flags & BUS_DEVICE_RESET_PENDING) -        return FAILED; -      else -        return SUCCESS; -    } -  } /* if (last_phase != P_BUSFREE).....indicates we are idle and can work */ -  /* -   * Simply set the MK_MESSAGE flag and the SEQINT handler will do -   * the rest on a reconnect/connect. -   */ -  scb->hscb->control |= MK_MESSAGE; -  scb->flags |= SCB_RESET | SCB_DEVICE_RESET; -  aic_dev->flags |= BUS_DEVICE_RESET_PENDING; -  /* -   * Check to see if the command is on the qinfifo.  If it is, then we will -   * not need to queue the command again since the card should start it soon -   */ -  if (aic7xxx_search_qinfifo(p, cmd->device->channel, cmd->device->id, cmd->device->lun, hscb->tag, -			  0, TRUE, NULL) == 0) -  { -    disconnected = TRUE; -    if ((hscb_index = aic7xxx_find_scb(p, scb)) != SCB_LIST_NULL) -    { -      unsigned char scb_control; - -      aic_outb(p, hscb_index, SCBPTR); -      scb_control = aic_inb(p, SCB_CONTROL); -      /* -       * If the DISCONNECTED bit is not set in SCB_CONTROL, then we are -       * actually on the waiting list, not disconnected, and we don't -       * need to requeue the command. -       */ -      disconnected = (scb_control & DISCONNECTED); -      aic_outb(p, scb_control | MK_MESSAGE, SCB_CONTROL); -    } -    if (disconnected) -    { -      /* -       * Actually requeue this SCB in case we can select the -       * device before it reconnects.  This can result in the command -       * being on the qinfifo twice, but we don't care because it will -       * all get cleaned up if/when the reset takes place. -       */ -      if (aic7xxx_verbose & VERBOSE_RESET_PROCESS) -        printk(INFO_LEAD "Queueing device reset command.\n", p->host_no, -		      CTL_OF_SCB(scb)); -      p->qinfifo[p->qinfifonext++] = scb->hscb->tag; -      if (p->features & AHC_QUEUE_REGS) -        aic_outb(p, p->qinfifonext, HNSCB_QOFF); -      else -        aic_outb(p, p->qinfifonext, KERNEL_QINPOS); -      scb->flags |= SCB_QUEUED_ABORT; -    } -  } -  aic_outb(p, saved_scbptr, SCBPTR); -  unpause_sequencer(p, FALSE); -  spin_unlock_irq(p->host->host_lock); -  msleep(1000/4); -  spin_lock_irq(p->host->host_lock); -  if(aic_dev->flags & BUS_DEVICE_RESET_PENDING) -    return FAILED; -  else -    return SUCCESS; -} - -static int aic7xxx_bus_device_reset(struct scsi_cmnd *cmd) -{ -      int rc; - -      spin_lock_irq(cmd->device->host->host_lock); -      rc = __aic7xxx_bus_device_reset(cmd); -      spin_unlock_irq(cmd->device->host->host_lock); - -      return rc; -} - - -/*+F************************************************************************* - * Function: - *   aic7xxx_panic_abort - * - * Description: - *   Abort the current SCSI command(s). - *-F*************************************************************************/ -static void aic7xxx_panic_abort(struct aic7xxx_host *p, struct scsi_cmnd *cmd) -{ - -  printk("aic7xxx driver version %s\n", AIC7XXX_C_VERSION); -  printk("Controller type:\n    %s\n", board_names[p->board_name_index]); -  printk("p->flags=0x%lx, p->chip=0x%x, p->features=0x%x, " -         "sequencer %s paused\n", -     p->flags, p->chip, p->features, -    (aic_inb(p, HCNTRL) & PAUSE) ? "is" : "isn't" ); -  pause_sequencer(p); -  disable_irq(p->irq); -  aic7xxx_print_card(p); -  aic7xxx_print_scratch_ram(p); -  spin_unlock_irq(p->host->host_lock); -  for(;;) barrier(); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_abort - * - * Description: - *   Abort the current SCSI command(s). - *-F*************************************************************************/ -static int __aic7xxx_abort(struct scsi_cmnd *cmd) -{ -  struct aic7xxx_scb  *scb = NULL; -  struct aic7xxx_host *p; -  int    found=0, disconnected; -  unsigned char saved_hscbptr, hscbptr, scb_control; -  struct aic_dev_data *aic_dev; - -  if(cmd == NULL) -  { -    printk(KERN_ERR "aic7xxx_abort: called with NULL cmd!\n"); -    return FAILED; -  } -  p = (struct aic7xxx_host *)cmd->device->host->hostdata; -  aic_dev = AIC_DEV(cmd); -  if(aic7xxx_position(cmd) < p->scb_data->numscbs) -    scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]); -  else -    return FAILED; - -  aic7xxx_isr(p); -  aic7xxx_done_cmds_complete(p); -  /* If the command was already complete or just completed, then we didn't -   * do a reset, return FAILED */ -  if(!(scb->flags & SCB_ACTIVE)) -    return FAILED; - -  pause_sequencer(p); - -  /* -   * I added a new config option to the driver: "panic_on_abort" that will -   * cause the driver to panic and the machine to stop on the first abort -   * or reset call into the driver.  At that point, it prints out a lot of -   * useful information for me which I can then use to try and debug the -   * problem.  Simply enable the boot time prompt in order to activate this -   * code. -   */ -  if (aic7xxx_panic_on_abort) -    aic7xxx_panic_abort(p, cmd); - -  if (aic7xxx_verbose & VERBOSE_ABORT) -  { -    printk(INFO_LEAD "Aborting scb %d, flags 0x%x, SEQADDR 0x%x, LASTPHASE " -           "0x%x\n", -         p->host_no, CTL_OF_SCB(scb), scb->hscb->tag, scb->flags, -         aic_inb(p, SEQADDR0) | (aic_inb(p, SEQADDR1) << 8), -         aic_inb(p, LASTPHASE)); -    printk(INFO_LEAD "SG_CACHEPTR 0x%x, SG_COUNT %d, SCSISIGI 0x%x\n", -         p->host_no, CTL_OF_SCB(scb), (p->features & AHC_ULTRA2) ? -         aic_inb(p, SG_CACHEPTR) : 0, aic_inb(p, SG_COUNT), -         aic_inb(p, SCSISIGI)); -    printk(INFO_LEAD "SSTAT0 0x%x, SSTAT1 0x%x, SSTAT2 0x%x\n", -         p->host_no, CTL_OF_SCB(scb), aic_inb(p, SSTAT0), -         aic_inb(p, SSTAT1), aic_inb(p, SSTAT2)); -  } - -  if (scb->flags & SCB_WAITINGQ) -  { -    if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)  -      printk(INFO_LEAD "SCB found on waiting list and " -          "aborted.\n", p->host_no, CTL_OF_SCB(scb)); -    scbq_remove(&p->waiting_scbs, scb); -    scbq_remove(&aic_dev->delayed_scbs, scb); -    aic_dev->active_cmds++; -    p->activescbs++; -    scb->flags &= ~(SCB_WAITINGQ | SCB_ACTIVE); -    scb->flags |= SCB_ABORT | SCB_QUEUED_FOR_DONE; -    goto success; -  } - -/* - *  We just checked the waiting_q, now for the QINFIFO - */ -  if ( ((found = aic7xxx_search_qinfifo(p, cmd->device->id, cmd->device->channel, -                     cmd->device->lun, scb->hscb->tag, SCB_ABORT | SCB_QUEUED_FOR_DONE, -                     FALSE, NULL)) != 0) && -                    (aic7xxx_verbose & VERBOSE_ABORT_PROCESS)) -  { -    printk(INFO_LEAD "SCB found in QINFIFO and aborted.\n", p->host_no, -		    CTL_OF_SCB(scb)); -    goto success; -  } - -/* - *  QINFIFO, waitingq, completeq done.  Next, check WAITING_SCB list in card - */ - -  saved_hscbptr = aic_inb(p, SCBPTR); -  if ((hscbptr = aic7xxx_find_scb(p, scb)) != SCB_LIST_NULL) -  { -    aic_outb(p, hscbptr, SCBPTR); -    scb_control = aic_inb(p, SCB_CONTROL); -    disconnected = scb_control & DISCONNECTED; -    /* -     * If the DISCONNECTED bit is not set in SCB_CONTROL, then we are -     * either currently active or on the waiting list. -     */ -    if(!disconnected && aic_inb(p, LASTPHASE) == P_BUSFREE) { -      if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS) -        printk(INFO_LEAD "SCB found on hardware waiting" -          " list and aborted.\n", p->host_no, CTL_OF_SCB(scb)); -      /* If we are the only waiting command, stop the selection engine */ -      if (aic_inb(p, WAITING_SCBH) == hscbptr && aic_inb(p, SCB_NEXT) == -			SCB_LIST_NULL) -      { -        aic_outb(p, aic_inb(p, SCSISEQ) & ~ENSELO, SCSISEQ); -        aic_outb(p, CLRSELTIMEO, CLRSINT1); -	aic_outb(p, SCB_LIST_NULL, WAITING_SCBH); -      } -      else -      { -	unsigned char prev, next; -	prev = SCB_LIST_NULL; -	next = aic_inb(p, WAITING_SCBH); -	while(next != SCB_LIST_NULL) -	{ -	  aic_outb(p, next, SCBPTR); -	  if (next == hscbptr) -	  { -	    next = aic_inb(p, SCB_NEXT); -	    if (prev != SCB_LIST_NULL) -	    { -	      aic_outb(p, prev, SCBPTR); -	      aic_outb(p, next, SCB_NEXT); -	    } -	    else -	      aic_outb(p, next, WAITING_SCBH); -	    aic_outb(p, hscbptr, SCBPTR); -	    next = SCB_LIST_NULL; -	  } -	  else -	  { -	    prev = next; -	    next = aic_inb(p, SCB_NEXT); -	  } -	} -      } -      aic_outb(p, SCB_LIST_NULL, SCB_TAG); -      aic_outb(p, 0, SCB_CONTROL); -      aic7xxx_add_curscb_to_free_list(p); -      scb->flags = SCB_ABORT | SCB_QUEUED_FOR_DONE; -      goto success; -    } -    else if (!disconnected) -    { -      /* -       * We are the currently active command -       */ -      if((aic_inb(p, LASTPHASE) == P_MESGIN) || -	 (aic_inb(p, LASTPHASE) == P_MESGOUT)) -      { -	/* -	 * Message buffer busy, unable to abort -	 */ -	printk(INFO_LEAD "message buffer busy, unable to abort.\n", -			  p->host_no, CTL_OF_SCB(scb)); -	unpause_sequencer(p, FALSE); -	return FAILED; -      } -      /* Fallthrough to below, set ATNO after we set SCB_CONTROL */ -    }  -    aic_outb(p,  scb_control | MK_MESSAGE, SCB_CONTROL); -    if(!disconnected) -    { -      aic_outb(p, HOST_MSG, MSG_OUT); -      aic_outb(p, aic_inb(p, SCSISIGI) | ATNO, SCSISIGO); -    } -    aic_outb(p, saved_hscbptr, SCBPTR); -  }  -  else -  { -    /* -     * The scb isn't in the card at all and it is active and it isn't in -     * any of the queues, so it must be disconnected and paged out.  Fall -     * through to the code below. -     */ -    disconnected = 1; -  } -         -  p->flags |= AHC_ABORT_PENDING; -  scb->flags |= SCB_QUEUED_ABORT | SCB_ABORT | SCB_RECOVERY_SCB; -  scb->hscb->control |= MK_MESSAGE; -  if(disconnected) -  { -    if (aic7xxx_verbose & VERBOSE_ABORT_PROCESS) -      printk(INFO_LEAD "SCB disconnected.  Queueing Abort" -        " SCB.\n", p->host_no, CTL_OF_SCB(scb)); -    p->qinfifo[p->qinfifonext++] = scb->hscb->tag; -    if (p->features & AHC_QUEUE_REGS) -      aic_outb(p, p->qinfifonext, HNSCB_QOFF); -    else -      aic_outb(p, p->qinfifonext, KERNEL_QINPOS); -  } -  unpause_sequencer(p, FALSE); -  spin_unlock_irq(p->host->host_lock); -  msleep(1000/4); -  spin_lock_irq(p->host->host_lock); -  if (p->flags & AHC_ABORT_PENDING) -  { -    if (aic7xxx_verbose & VERBOSE_ABORT_RETURN) -      printk(INFO_LEAD "Abort never delivered, returning FAILED\n", p->host_no, -		    CTL_OF_CMD(cmd)); -    p->flags &= ~AHC_ABORT_PENDING; -    return FAILED; -  } -  if (aic7xxx_verbose & VERBOSE_ABORT_RETURN) -    printk(INFO_LEAD "Abort successful.\n", p->host_no, CTL_OF_CMD(cmd)); -  return SUCCESS; - -success: -  if (aic7xxx_verbose & VERBOSE_ABORT_RETURN) -    printk(INFO_LEAD "Abort successful.\n", p->host_no, CTL_OF_CMD(cmd)); -  aic7xxx_run_done_queue(p, TRUE); -  unpause_sequencer(p, FALSE); -  return SUCCESS; -} - -static int aic7xxx_abort(struct scsi_cmnd *cmd) -{ -	int rc; - -	spin_lock_irq(cmd->device->host->host_lock); -	rc = __aic7xxx_abort(cmd); -	spin_unlock_irq(cmd->device->host->host_lock); - -	return rc; -} - - -/*+F************************************************************************* - * Function: - *   aic7xxx_reset - * - * Description: - *   Resetting the bus always succeeds - is has to, otherwise the - *   kernel will panic! Try a surgical technique - sending a BUS - *   DEVICE RESET message - on the offending target before pulling - *   the SCSI bus reset line. - *-F*************************************************************************/ -static int aic7xxx_reset(struct scsi_cmnd *cmd) -{ -  struct aic7xxx_scb *scb; -  struct aic7xxx_host *p; -  struct aic_dev_data *aic_dev; - -  p = (struct aic7xxx_host *) cmd->device->host->hostdata; -  spin_lock_irq(p->host->host_lock); - -  aic_dev = AIC_DEV(cmd); -  if(aic7xxx_position(cmd) < p->scb_data->numscbs) -  { -    scb = (p->scb_data->scb_array[aic7xxx_position(cmd)]); -    if (scb->cmd != cmd) -      scb = NULL; -  } -  else -  { -    scb = NULL; -  } - -  /* -   * I added a new config option to the driver: "panic_on_abort" that will -   * cause the driver to panic and the machine to stop on the first abort -   * or reset call into the driver.  At that point, it prints out a lot of -   * useful information for me which I can then use to try and debug the -   * problem.  Simply enable the boot time prompt in order to activate this -   * code. -   */ -  if (aic7xxx_panic_on_abort) -    aic7xxx_panic_abort(p, cmd); - -  pause_sequencer(p); - -  while((aic_inb(p, INTSTAT) & INT_PEND) && !(p->flags & AHC_IN_ISR)) -  { -    aic7xxx_isr(p); -    pause_sequencer(p); -  } -  aic7xxx_done_cmds_complete(p); - -  if(scb && (scb->cmd == NULL)) -  { -    /* -     * We just completed the command when we ran the isr stuff, so we no -     * longer have it. -     */ -    unpause_sequencer(p, FALSE); -    spin_unlock_irq(p->host->host_lock); -    return SUCCESS; -  } -     -/* - *  By this point, we want to already know what we are going to do and - *  only have the following code implement our course of action. - */ -  aic7xxx_reset_channel(p, cmd->device->channel, TRUE); -  if (p->features & AHC_TWIN) -  { -    aic7xxx_reset_channel(p, cmd->device->channel ^ 0x01, TRUE); -    restart_sequencer(p); -  } -  aic_outb(p,  aic_inb(p, SIMODE1) & ~(ENREQINIT|ENBUSFREE), SIMODE1); -  aic7xxx_clear_intstat(p); -  p->flags &= ~AHC_HANDLING_REQINITS; -  p->msg_type = MSG_TYPE_NONE; -  p->msg_index = 0; -  p->msg_len = 0; -  aic7xxx_run_done_queue(p, TRUE); -  unpause_sequencer(p, FALSE); -  spin_unlock_irq(p->host->host_lock); -  ssleep(2); -  return SUCCESS; -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_biosparam - * - * Description: - *   Return the disk geometry for the given SCSI device. - * - * Note: - *   This function is broken for today's really large drives and needs - *   fixed. - *-F*************************************************************************/ -static int -aic7xxx_biosparam(struct scsi_device *sdev, struct block_device *bdev, -		sector_t capacity, int geom[]) -{ -  sector_t heads, sectors, cylinders; -  int ret; -  struct aic7xxx_host *p; -  unsigned char *buf; - -  p = (struct aic7xxx_host *) sdev->host->hostdata; -  buf = scsi_bios_ptable(bdev); - -  if ( buf ) -  { -    ret = scsi_partsize(buf, capacity, &geom[2], &geom[0], &geom[1]); -    kfree(buf); -    if ( ret != -1 ) -      return(ret); -  } -   -  heads = 64; -  sectors = 32; -  cylinders = capacity >> 11; - -  if ((p->flags & AHC_EXTEND_TRANS_A) && (cylinders > 1024)) -  { -    heads = 255; -    sectors = 63; -    cylinders = capacity >> 14; -    if(capacity > (65535 * heads * sectors)) -      cylinders = 65535; -    else -      cylinders = ((unsigned int)capacity) / (unsigned int)(heads * sectors); -  } - -  geom[0] = (int)heads; -  geom[1] = (int)sectors; -  geom[2] = (int)cylinders; - -  return (0); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_release - * - * Description: - *   Free the passed in Scsi_Host memory structures prior to unloading the - *   module. - *-F*************************************************************************/ -static int -aic7xxx_release(struct Scsi_Host *host) -{ -  struct aic7xxx_host *p = (struct aic7xxx_host *) host->hostdata; -  struct aic7xxx_host *next, *prev; - -  if(p->irq) -    free_irq(p->irq, p); -#ifdef MMAPIO -  if(p->maddr) -  { -    iounmap(p->maddr); -  } -#endif /* MMAPIO */ -  if(!p->pdev) -    release_region(p->base, MAXREG - MINREG); -#ifdef CONFIG_PCI -  else { -    pci_release_regions(p->pdev); -    pci_dev_put(p->pdev); -  } -#endif -  prev = NULL; -  next = first_aic7xxx; -  while(next != NULL) -  { -    if(next == p) -    { -      if(prev == NULL) -        first_aic7xxx = next->next; -      else -        prev->next = next->next; -    } -    else -    { -      prev = next; -    } -    next = next->next; -  } -  aic7xxx_free(p); -  return(0); -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_print_card - * - * Description: - *   Print out all of the control registers on the card - * - *   NOTE: This function is not yet safe for use on the VLB and EISA - *   controllers, so it isn't used on those controllers at all. - *-F*************************************************************************/ -static void -aic7xxx_print_card(struct aic7xxx_host *p) -{ -  int i, j, k, chip; -  static struct register_ranges { -    int num_ranges; -    int range_val[32]; -  } cards_ds[] = { -    { 0, {0,} }, /* none */ -    {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1f, 0x1f, 0x60, 0x60, /*7771*/ -          0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9b, 0x9f} }, -    { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7850*/ -          0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} }, -    { 9, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7860*/ -          0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} }, -    {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1c, 0x1f, 0x60, 0x60, /*7870*/ -          0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} }, -    {10, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1a, 0x1c, 0x1f, 0x60, 0x60, /*7880*/ -          0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9f} }, -    {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7890*/ -          0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f, -          0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc, -          0xfe, 0xff} }, -    {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x19, 0x1b, 0x1f, 0x60, 0x60, /*7895*/ -          0x62, 0x66, 0x80, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, -          0x9f, 0x9f, 0xe0, 0xf1} }, -    {16, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7896*/ -          0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9f, 0x9f, -          0xe0, 0xf1, 0xf4, 0xf4, 0xf6, 0xf6, 0xf8, 0xf8, 0xfa, 0xfc, -          0xfe, 0xff} }, -    {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7892*/ -          0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9c, 0x9f, -          0xe0, 0xf1, 0xf4, 0xfc} }, -    {12, {0x00, 0x05, 0x08, 0x11, 0x18, 0x1f, 0x60, 0x60, 0x62, 0x66, /*7899*/ -          0x84, 0x8e, 0x90, 0x95, 0x97, 0x97, 0x9a, 0x9a, 0x9c, 0x9f, -          0xe0, 0xf1, 0xf4, 0xfc} }, -  }; -  chip = p->chip & AHC_CHIPID_MASK; -  printk("%s at ", -         board_names[p->board_name_index]); -  switch(p->chip & ~AHC_CHIPID_MASK) -  { -    case AHC_VL: -      printk("VLB Slot %d.\n", p->pci_device_fn); -      break; -    case AHC_EISA: -      printk("EISA Slot %d.\n", p->pci_device_fn); -      break; -    case AHC_PCI: -    default: -      printk("PCI %d/%d/%d.\n", p->pci_bus, PCI_SLOT(p->pci_device_fn), -             PCI_FUNC(p->pci_device_fn)); -      break; -  } - -  /* -   * the registers on the card.... -   */ -  printk("Card Dump:\n"); -  k = 0; -  for(i=0; i<cards_ds[chip].num_ranges; i++) -  { -    for(j  = cards_ds[chip].range_val[ i * 2 ]; -        j <= cards_ds[chip].range_val[ i * 2 + 1 ] ; -        j++) -    { -      printk("%02x:%02x ", j, aic_inb(p, j)); -      if(++k == 13) -      { -        printk("\n"); -        k=0; -      } -    } -  } -  if(k != 0) -    printk("\n"); - -  /* -   * If this was an Ultra2 controller, then we just hosed the card in terms -   * of the QUEUE REGS.  This function is only called at init time or by -   * the panic_abort function, so it's safe to assume a generic init time -   * setting here -   */ - -  if(p->features & AHC_QUEUE_REGS) -  { -    aic_outb(p, 0, SDSCB_QOFF); -    aic_outb(p, 0, SNSCB_QOFF); -    aic_outb(p, 0, HNSCB_QOFF); -  } - -} - -/*+F************************************************************************* - * Function: - *   aic7xxx_print_scratch_ram - * - * Description: - *   Print out the scratch RAM values on the card. - *-F*************************************************************************/ -static void -aic7xxx_print_scratch_ram(struct aic7xxx_host *p) -{ -  int i, k; - -  k = 0; -  printk("Scratch RAM:\n"); -  for(i = SRAM_BASE; i < SEQCTL; i++) -  { -    printk("%02x:%02x ", i, aic_inb(p, i)); -    if(++k == 13) -    { -      printk("\n"); -      k=0; -    } -  } -  if (p->features & AHC_MORE_SRAM) -  { -    for(i = TARG_OFFSET; i < 0x80; i++) -    { -      printk("%02x:%02x ", i, aic_inb(p, i)); -      if(++k == 13) -      { -        printk("\n"); -        k=0; -      } -    } -  } -  printk("\n"); -} - - -#include "aic7xxx_old/aic7xxx_proc.c" - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_VERSION(AIC7XXX_H_VERSION); - - -static struct scsi_host_template driver_template = { -	.show_info		= aic7xxx_show_info, -	.detect			= aic7xxx_detect, -	.release		= aic7xxx_release, -	.info			= aic7xxx_info,	 -	.queuecommand		= aic7xxx_queue, -	.slave_alloc		= aic7xxx_slave_alloc, -	.slave_configure	= aic7xxx_slave_configure, -	.slave_destroy		= aic7xxx_slave_destroy, -	.bios_param		= aic7xxx_biosparam, -	.eh_abort_handler	= aic7xxx_abort, -	.eh_device_reset_handler	= aic7xxx_bus_device_reset, -	.eh_host_reset_handler	= aic7xxx_reset, -	.can_queue		= 255, -	.this_id		= -1, -	.max_sectors		= 2048, -	.cmd_per_lun		= 3, -	.use_clustering		= ENABLE_CLUSTERING, -}; - -#include "scsi_module.c" - -/* - * Overrides for Emacs so that we almost follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only.  This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-indent-level: 2 - * c-brace-imaginary-offset: 0 - * c-brace-offset: -2 - * c-argdecl-indent: 2 - * c-label-offset: -2 - * c-continued-statement-offset: 2 - * c-continued-brace-offset: 0 - * indent-tabs-mode: nil - * tab-width: 8 - * End: - */ diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.h b/drivers/scsi/aic7xxx_old/aic7xxx.h deleted file mode 100644 index 0116c8128a6..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx.h +++ /dev/null @@ -1,28 +0,0 @@ -/*+M************************************************************************* - * Adaptec AIC7xxx device driver for Linux. - * - * Copyright (c) 1994 John Aycock - *   The University of Calgary Department of Computer Science. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING.  If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. - *  - * $Id: aic7xxx.h,v 3.2 1996/07/23 03:37:26 deang Exp $ - *-M*************************************************************************/ -#ifndef _aic7xxx_h -#define _aic7xxx_h - -#define AIC7XXX_H_VERSION  "5.2.0" - -#endif /* _aic7xxx_h */ diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.reg b/drivers/scsi/aic7xxx_old/aic7xxx.reg deleted file mode 100644 index f67b4bced01..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx.reg +++ /dev/null @@ -1,1401 +0,0 @@ -/* - * Aic7xxx register and scratch ram definitions. - * - * Copyright (c) 1994-1998 Justin Gibbs. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - *    notice, this list of conditions, and the following disclaimer, - *    without modification, immediately at the beginning of the file. - * 2. The name of the author may not be used to endorse or promote products - *    derived from this software without specific prior written permission. - * - * Where this Software is combined with software released under the terms of  - * the GNU General Public License ("GPL") and the terms of the GPL would require the  - * combined work to also be released under the terms of the GPL, the terms - * and conditions of this License will apply in addition to those of the - * GPL with the exception of any terms or conditions of this License that - * conflict with, or are expressly prohibited by, the GPL. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - *	$Id: aic7xxx.reg,v 1.4 1997/06/27 19:38:39 gibbs Exp $ - */ - -/* - * This file is processed by the aic7xxx_asm utility for use in assembling - * firmware for the aic7xxx family of SCSI host adapters as well as to generate - * a C header file for use in the kernel portion of the Aic7xxx driver. - * - * All page numbers refer to the Adaptec AIC-7770 Data Book available from - * Adaptec's Technical Documents Department 1-800-934-2766 - */ - -/* - * SCSI Sequence Control (p. 3-11). - * Each bit, when set starts a specific SCSI sequence on the bus - */ -register SCSISEQ { -	address			0x000 -	access_mode RW -	bit	TEMODE		0x80 -	bit	ENSELO		0x40 -	bit	ENSELI		0x20 -	bit	ENRSELI		0x10 -	bit	ENAUTOATNO	0x08 -	bit	ENAUTOATNI	0x04 -	bit	ENAUTOATNP	0x02 -	bit	SCSIRSTO	0x01 -} - -/* - * SCSI Transfer Control 0 Register (pp. 3-13). - * Controls the SCSI module data path. - */ -register SXFRCTL0 { -	address			0x001 -	access_mode RW -	bit	DFON		0x80 -	bit	DFPEXP		0x40 -	bit	FAST20		0x20 -	bit	CLRSTCNT	0x10 -	bit	SPIOEN		0x08 -	bit	SCAMEN		0x04 -	bit	CLRCHN		0x02 -} - -/* - * SCSI Transfer Control 1 Register (pp. 3-14,15). - * Controls the SCSI module data path. - */ -register SXFRCTL1 { -	address			0x002 -	access_mode RW -	bit	BITBUCKET	0x80 -	bit	SWRAPEN		0x40 -	bit	ENSPCHK		0x20 -	mask	STIMESEL	0x18 -	bit	ENSTIMER	0x04 -	bit	ACTNEGEN	0x02 -	bit	STPWEN		0x01	/* Powered Termination */ -} - -/* - * SCSI Control Signal Read Register (p. 3-15). - * Reads the actual state of the SCSI bus pins - */ -register SCSISIGI { -	address			0x003 -	access_mode RO -	bit	CDI		0x80 -	bit	IOI		0x40 -	bit	MSGI		0x20 -	bit	ATNI		0x10 -	bit	SELI		0x08 -	bit	BSYI		0x04 -	bit	REQI		0x02 -	bit	ACKI		0x01 -/* - * Possible phases in SCSISIGI - */ -	mask	PHASE_MASK	CDI|IOI|MSGI -	mask	P_DATAOUT	0x00 -	mask	P_DATAIN	IOI -	mask	P_COMMAND	CDI -	mask	P_MESGOUT	CDI|MSGI -	mask	P_STATUS	CDI|IOI -	mask	P_MESGIN	CDI|IOI|MSGI -} - -/* - * SCSI Control Signal Write Register (p. 3-16). - * Writing to this register modifies the control signals on the bus.  Only - * those signals that are allowed in the current mode (Initiator/Target) are - * asserted. - */ -register SCSISIGO { -	address			0x003 -	access_mode WO -	bit	CDO		0x80 -	bit	IOO		0x40 -	bit	MSGO		0x20 -	bit	ATNO		0x10 -	bit	SELO		0x08 -	bit	BSYO		0x04 -	bit	REQO		0x02 -	bit	ACKO		0x01 -/* - * Possible phases to write into SCSISIG0 - */ -	mask	PHASE_MASK	CDI|IOI|MSGI -	mask	P_DATAOUT	0x00 -	mask	P_DATAIN	IOI -	mask	P_COMMAND	CDI -	mask	P_MESGOUT	CDI|MSGI -	mask	P_STATUS	CDI|IOI -	mask	P_MESGIN	CDI|IOI|MSGI -} - -/*  - * SCSI Rate Control (p. 3-17). - * Contents of this register determine the Synchronous SCSI data transfer - * rate and the maximum synchronous Req/Ack offset.  An offset of 0 in the - * SOFS (3:0) bits disables synchronous data transfers.  Any offset value - * greater than 0 enables synchronous transfers. - */ -register SCSIRATE { -	address			0x004 -	access_mode RW -	bit	WIDEXFER	0x80		/* Wide transfer control */ -	mask	SXFR		0x70		/* Sync transfer rate */ -	mask	SXFR_ULTRA2	0x7f		/* Sync transfer rate */ -	mask	SOFS		0x0f		/* Sync offset */ -} - -/* - * SCSI ID (p. 3-18). - * Contains the ID of the board and the current target on the - * selected channel. - */ -register SCSIID	{ -	address			0x005 -	access_mode RW -	mask	TID		0xf0		/* Target ID mask */ -	mask	OID		0x0f		/* Our ID mask */ -	/* -	 * SCSI Maximum Offset (p. 4-61 aic7890/91 Data Book) -	 * The aic7890/91 allow an offset of up to 127 transfers in both wide -	 * and narrow mode. -	 */ -	alias	SCSIOFFSET -	mask	SOFS_ULTRA2	0x7f		/* Sync offset U2 chips */ -} - -/* - * SCSI Latched Data (p. 3-19). - * Read/Write latches used to transfer data on the SCSI bus during - * Automatic or Manual PIO mode.  SCSIDATH can be used for the - * upper byte of a 16bit wide asynchronouse data phase transfer. - */ -register SCSIDATL { -	address			0x006 -	access_mode RW -} - -register SCSIDATH { -	address			0x007 -	access_mode RW -} - -/* - * SCSI Transfer Count (pp. 3-19,20) - * These registers count down the number of bytes transferred - * across the SCSI bus.  The counter is decremented only once - * the data has been safely transferred.  SDONE in SSTAT0 is - * set when STCNT goes to 0 - */  -register STCNT { -	address			0x008 -	size	3 -	access_mode RW -} - -/* - * Option Mode Register (Alternate Mode) (p. 5-198) - * This register is used to set certain options on Ultra3 based chips. - * The chip must be in alternate mode (bit ALT_MODE in SFUNCT must be set) - */ -register OPTIONMODE { -	address			0x008 -	access_mode RW -	bit	AUTORATEEN	0x80 -	bit	AUTOACKEN	0x40 -	bit	ATNMGMNTEN	0x20 -	bit	BUSFREEREV	0x10 -	bit	EXPPHASEDIS	0x08 -	bit	SCSIDATL_IMGEN	0x04 -	bit	AUTO_MSGOUT_DE	0x02 -	bit	DIS_MSGIN_DUALEDGE	0x01 -} - - -/* - * Clear SCSI Interrupt 0 (p. 3-20) - * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT0. - */ -register CLRSINT0 { -	address			0x00b -	access_mode WO -	bit	CLRSELDO	0x40 -	bit	CLRSELDI	0x20 -	bit	CLRSELINGO	0x10 -	bit	CLRSWRAP	0x08 -	bit	CLRSPIORDY	0x02 -} - -/* - * SCSI Status 0 (p. 3-21) - * Contains one set of SCSI Interrupt codes - * These are most likely of interest to the sequencer - */ -register SSTAT0	{ -	address			0x00b -	access_mode RO -	bit	TARGET		0x80	/* Board acting as target */ -	bit	SELDO		0x40	/* Selection Done */ -	bit	SELDI		0x20	/* Board has been selected */ -	bit	SELINGO		0x10	/* Selection In Progress */ -	bit	SWRAP		0x08	/* 24bit counter wrap */ -	bit	IOERR		0x08	/* LVD Tranceiver mode changed */ -	bit	SDONE		0x04	/* STCNT = 0x000000 */ -	bit	SPIORDY		0x02	/* SCSI PIO Ready */ -	bit	DMADONE		0x01	/* DMA transfer completed */ -} - -/* - * Clear SCSI Interrupt 1 (p. 3-23) - * Writing a 1 to a bit clears the associated SCSI Interrupt in SSTAT1. - */ -register CLRSINT1 { -	address			0x00c -	access_mode WO -	bit	CLRSELTIMEO	0x80 -	bit	CLRATNO		0x40 -	bit	CLRSCSIRSTI	0x20 -	bit	CLRBUSFREE	0x08 -	bit	CLRSCSIPERR	0x04 -	bit	CLRPHASECHG	0x02 -	bit	CLRREQINIT	0x01 -} - -/* - * SCSI Status 1 (p. 3-24) - */ -register SSTAT1	{ -	address			0x00c -	access_mode RO -	bit	SELTO		0x80 -	bit	ATNTARG 	0x40 -	bit	SCSIRSTI	0x20 -	bit	PHASEMIS	0x10 -	bit	BUSFREE		0x08 -	bit	SCSIPERR	0x04 -	bit	PHASECHG	0x02 -	bit	REQINIT		0x01 -} - -/* - * SCSI Status 2 (pp. 3-25,26) - */ -register SSTAT2 { -	address			0x00d -	access_mode RO -	bit	OVERRUN		0x80 -	bit	SHVALID		0x40 -	bit	WIDE_RES	0x20 -	bit	EXP_ACTIVE	0x10	/* SCSI Expander Active */ -	bit	CRCVALERR	0x08	/* CRC Value Error */ -	bit	CRCENDERR	0x04	/* CRC End Error */ -	bit	CRCREQERR	0x02	/* CRC REQ Error */ -	bit	DUAL_EDGE_ERROR	0x01	/* Invalid pins for Dual Edge phase */ -	mask	SFCNT		0x1f -} - -/* - * SCSI Status 3 (p. 3-26) - */ -register SSTAT3 { -	address			0x00e -	access_mode RO -	mask	SCSICNT		0xf0 -	mask	OFFCNT		0x0f -} - -/* - * SCSI ID for the aic7890/91 chips - */ -register SCSIID_ULTRA2 { -	address			0x00f -	access_mode RW -	mask	TID		0xf0		/* Target ID mask */ -	mask	OID		0x0f		/* Our ID mask */ -} - -/* - * SCSI Interrupt Mode 1 (p. 3-28) - * Setting any bit will enable the corresponding function - * in SIMODE0 to interrupt via the IRQ pin. - */ -register SIMODE0 { -	address			0x010 -	access_mode RW -	bit	ENSELDO		0x40 -	bit	ENSELDI		0x20 -	bit	ENSELINGO	0x10 -	bit	ENSWRAP		0x08 -	bit	ENIOERR		0x08	/* LVD Tranceiver mode changes */ -	bit	ENSDONE		0x04 -	bit	ENSPIORDY	0x02 -	bit	ENDMADONE	0x01 -} - -/* - * SCSI Interrupt Mode 1 (pp. 3-28,29) - * Setting any bit will enable the corresponding function - * in SIMODE1 to interrupt via the IRQ pin. - */ -register SIMODE1 { -	address			0x011 -	access_mode RW -	bit	ENSELTIMO	0x80 -	bit	ENATNTARG	0x40 -	bit	ENSCSIRST	0x20 -	bit	ENPHASEMIS	0x10 -	bit	ENBUSFREE	0x08 -	bit	ENSCSIPERR	0x04 -	bit	ENPHASECHG	0x02 -	bit	ENREQINIT	0x01 -} - -/* - * SCSI Data Bus (High) (p. 3-29) - * This register reads data on the SCSI Data bus directly. - */ -register SCSIBUSL { -	address			0x012 -	access_mode RO -} - -register SCSIBUSH { -	address			0x013 -	access_mode RO -} - -/* - * SCSI/Host Address (p. 3-30) - * These registers hold the host address for the byte about to be - * transferred on the SCSI bus.  They are counted up in the same - * manner as STCNT is counted down.  SHADDR should always be used - * to determine the address of the last byte transferred since HADDR - * can be skewed by write ahead. - */ -register SHADDR { -	address			0x014 -	size	4 -	access_mode RO -} - -/* - * Selection Timeout Timer (p. 3-30) - */ -register SELTIMER { -	address			0x018 -	access_mode RW -	bit	STAGE6		0x20 -	bit	STAGE5		0x10 -	bit	STAGE4		0x08 -	bit	STAGE3		0x04 -	bit	STAGE2		0x02 -	bit	STAGE1		0x01 -} - -/* - * Selection/Reselection ID (p. 3-31) - * Upper four bits are the device id.  The ONEBIT is set when the re/selecting - * device did not set its own ID. - */ -register SELID { -	address			0x019 -	access_mode RW -	mask	SELID_MASK	0xf0 -	bit	ONEBIT		0x08 -} - -/* - * Serial Port I/O Cabability register (p. 4-95 aic7860 Data Book) - * Indicates if external logic has been attached to the chip to - * perform the tasks of accessing a serial eeprom, testing termination - * strength, and performing cable detection.  On the aic7860, most of - * these features are handled on chip, but on the aic7855 an attached - * aic3800 does the grunt work. - */ -register SPIOCAP { -	address			0x01b -	access_mode RW -	bit	SOFT1		0x80 -	bit	SOFT0		0x40 -	bit	SOFTCMDEN	0x20	 -	bit	HAS_BRDCTL	0x10	/* External Board control */ -	bit	SEEPROM		0x08	/* External serial eeprom logic */ -	bit	EEPROM		0x04	/* Writable external BIOS ROM */ -	bit	ROM		0x02	/* Logic for accessing external ROM */ -	bit	SSPIOCPS	0x01	/* Termination and cable detection */ -} - -/* - * SCSI Block Control (p. 3-32) - * Controls Bus type and channel selection.  In a twin channel configuration - * addresses 0x00-0x1e are gated to the appropriate channel based on this - * register.  SELWIDE allows for the coexistence of 8bit and 16bit devices - * on a wide bus. - */ -register SBLKCTL { -	address			0x01f -	access_mode RW -	bit	DIAGLEDEN	0x80	/* Aic78X0 only */ -	bit	DIAGLEDON	0x40	/* Aic78X0 only */ -	bit	AUTOFLUSHDIS	0x20 -	bit	SELBUSB		0x08 -	bit	ENAB40		0x08	/* LVD transceiver active */ -	bit	ENAB20		0x04	/* SE/HVD transceiver active */ -	bit	SELWIDE		0x02 -	bit	XCVR		0x01	/* External transceiver active */ -} - -/* - * Sequencer Control (p. 3-33) - * Error detection mode and speed configuration - */ -register SEQCTL { -	address			0x060 -	access_mode RW -	bit	PERRORDIS	0x80 -	bit	PAUSEDIS	0x40 -	bit	FAILDIS		0x20 -	bit	FASTMODE	0x10 -	bit	BRKADRINTEN	0x08 -	bit	STEP		0x04 -	bit	SEQRESET	0x02 -	bit	LOADRAM		0x01 -} - -/* - * Sequencer RAM Data (p. 3-34) - * Single byte window into the Scratch Ram area starting at the address - * specified by SEQADDR0 and SEQADDR1.  To write a full word, simply write - * four bytes in succession.  The SEQADDRs will increment after the most - * significant byte is written - */ -register SEQRAM { -	address			0x061 -	access_mode RW -} - -/* - * Sequencer Address Registers (p. 3-35) - * Only the first bit of SEQADDR1 holds addressing information - */ -register SEQADDR0 { -	address			0x062 -	access_mode RW -} - -register SEQADDR1 { -	address			0x063 -	access_mode RW -	mask	SEQADDR1_MASK	0x01 -} - -/* - * Accumulator - * We cheat by passing arguments in the Accumulator up to the kernel driver - */ -register ACCUM { -	address			0x064 -	access_mode RW -	accumulator -} - -register SINDEX	{ -	address			0x065 -	access_mode RW -	sindex -} - -register DINDEX { -	address			0x066 -	access_mode RW -} - -register ALLONES { -	address			0x069 -	access_mode RO -	allones -} - -register ALLZEROS { -	address			0x06a -	access_mode RO -	allzeros -} - -register NONE { -	address			0x06a -	access_mode WO -	none -} - -register FLAGS { -	address			0x06b -	access_mode RO -	bit	ZERO		0x02 -	bit	CARRY		0x01 -} - -register SINDIR	{ -	address			0x06c -	access_mode RO -} - -register DINDIR	 { -	address			0x06d -	access_mode WO -} - -register FUNCTION1 { -	address			0x06e -	access_mode RW -} - -register STACK { -	address			0x06f -	access_mode RO -} - -/* - * Board Control (p. 3-43) - */ -register BCTL { -	address			0x084 -	access_mode RW -	bit	ACE		0x08 -	bit	ENABLE		0x01 -} - -register DSCOMMAND0 { -	address			0x084 -	access_mode RW -	bit	CACHETHEN	0x80 -	bit	DPARCKEN	0x40 -	bit	MPARCKEN	0x20 -	bit	EXTREQLCK	0x10 -	bit	INTSCBRAMSEL	0x08 -	bit	RAMPS		0x04 -	bit	USCBSIZE32	0x02 -	bit	CIOPARCKEN	0x01 -} - -/* - * On the aic78X0 chips, Board Control is replaced by the DSCommand - * register (p. 4-64) - */ -register DSCOMMAND { -	address			0x084 -	access_mode RW -	bit	CACHETHEN	0x80	/* Cache Threshold enable */ -	bit	DPARCKEN	0x40	/* Data Parity Check Enable */ -	bit	MPARCKEN	0x20	/* Memory Parity Check Enable */ -	bit	EXTREQLCK	0x10	/* External Request Lock */ -} - -/* - * Bus On/Off Time (p. 3-44) - */ -register BUSTIME { -	address			0x085 -	access_mode RW -	mask	BOFF		0xf0 -	mask	BON		0x0f -} - -/* - * Bus Speed (p. 3-45) - */ -register BUSSPD { -	address			0x086 -	access_mode RW -	mask	DFTHRSH		0xc0 -	mask	STBOFF		0x38 -	mask	STBON		0x07 -	mask	DFTHRSH_100	0xc0 -} - -/* - * Host Control (p. 3-47) R/W - * Overall host control of the device. - */ -register HCNTRL { -	address			0x087 -	access_mode RW -	bit	POWRDN		0x40 -	bit	SWINT		0x10 -	bit	IRQMS		0x08 -	bit	PAUSE		0x04 -	bit	INTEN		0x02 -	bit	CHIPRST		0x01 -	bit	CHIPRSTACK	0x01 -} - -/* - * Host Address (p. 3-48) - * This register contains the address of the byte about - * to be transferred across the host bus. - */ -register HADDR { -	address			0x088 -	size	4 -	access_mode RW -} - -register HCNT { -	address			0x08c -	size	3 -	access_mode RW -} - -/* - * SCB Pointer (p. 3-49) - * Gate one of the four SCBs into the SCBARRAY window. - */ -register SCBPTR { -	address			0x090 -	access_mode RW -} - -/* - * Interrupt Status (p. 3-50) - * Status for system interrupts - */ -register INTSTAT { -	address			0x091 -	access_mode RW -	bit	BRKADRINT 0x08 -	bit	SCSIINT	  0x04 -	bit	CMDCMPLT  0x02 -	bit	SEQINT    0x01 -	mask	BAD_PHASE	SEQINT		/* unknown scsi bus phase */ -	mask	SEND_REJECT	0x10|SEQINT	/* sending a message reject */ -	mask	NO_IDENT	0x20|SEQINT	/* no IDENTIFY after reconnect*/ -	mask	NO_MATCH	0x30|SEQINT	/* no cmd match for reconnect */ -	mask	EXTENDED_MSG	0x40|SEQINT	/* Extended message received */ -	mask	WIDE_RESIDUE	0x50|SEQINT	/* need kernel to back up */ -						/* the SG array for us */ -	mask	REJECT_MSG	0x60|SEQINT	/* Reject message received */ -	mask	BAD_STATUS	0x70|SEQINT	/* Bad status from target */ -	mask	RESIDUAL	0x80|SEQINT	/* Residual byte count != 0 */ -	mask	AWAITING_MSG	0xa0|SEQINT	/* -						 * Kernel requested to specify -						 * a message to this target -						 * (command was null), so tell -						 * it that it can fill the -						 * message buffer. -						 */ -	mask	SEQ_SG_FIXUP	0xb0|SEQINT	/* need help with fixing up -						 * the sg array pointer after -						 * a phasemis with no valid -						 * sg elements in the shadow -						 * pipeline. -						 */ -	mask	TRACEPOINT2	0xc0|SEQINT -	mask	MSGIN_PHASEMIS	0xd0|SEQINT	/* -						 * Target changed phase on us -						 * when we were expecting -						 * another msgin byte. -						 */ -	mask	DATA_OVERRUN	0xe0|SEQINT	/* -						 * Target attempted to write -						 * beyond the bounds of its -						 * command. -						 */ - -	mask	SEQINT_MASK	0xf0|SEQINT	/* SEQINT Status Codes */ -	mask	INT_PEND  (BRKADRINT|SEQINT|SCSIINT|CMDCMPLT) -} - -/* - * Hard Error (p. 3-53) - * Reporting of catastrophic errors.  You usually cannot recover from - * these without a full board reset. - */ -register ERROR { -	address			0x092 -	access_mode RO -	bit	CIOPARERR	0x80	/* Ultra2 only */ -	bit	PCIERRSTAT	0x40	/* PCI only */ -	bit	MPARERR		0x20	/* PCI only */ -	bit	DPARERR		0x10	/* PCI only */ -	bit	SQPARERR	0x08 -	bit	ILLOPCODE	0x04 -	bit	ILLSADDR	0x02 -	bit	DSCTMOUT	0x02	/* Ultra3 only */ -	bit	ILLHADDR	0x01 -} - -/* - * Clear Interrupt Status (p. 3-52) - */ -register CLRINT { -	address			0x092 -	access_mode WO -	bit	CLRPARERR	0x10	/* PCI only */ -	bit	CLRBRKADRINT	0x08 -	bit	CLRSCSIINT      0x04 -	bit	CLRCMDINT 	0x02 -	bit	CLRSEQINT 	0x01 -} - -register DFCNTRL { -	address			0x093 -	access_mode RW -	bit	PRELOADEN	0x80	/* aic7890 only */ -	bit	WIDEODD		0x40 -	bit	SCSIEN		0x20 -	bit	SDMAEN		0x10 -	bit	SDMAENACK	0x10 -	bit	HDMAEN		0x08 -	bit	HDMAENACK	0x08 -	bit	DIRECTION	0x04 -	bit	FIFOFLUSH	0x02 -	bit	FIFORESET	0x01 -} - -register DFSTATUS { -	address			0x094 -	access_mode RO -	bit	PRELOAD_AVAIL	0x80 -	bit	DWORDEMP	0x20 -	bit	MREQPEND	0x10 -	bit	HDONE		0x08 -	bit	DFTHRESH	0x04 -	bit	FIFOFULL	0x02 -	bit	FIFOEMP		0x01 -} - -register DFDAT { -	address			0x099 -	access_mode RW -} - -/* - * SCB Auto Increment (p. 3-59) - * Byte offset into the SCB Array and an optional bit to allow auto - * incrementing of the address during download and upload operations - */ -register SCBCNT { -	address			0x09a -	access_mode RW -	bit	SCBAUTO		0x80 -	mask	SCBCNT_MASK	0x1f -} - -/* - * Queue In FIFO (p. 3-60) - * Input queue for queued SCBs (commands that the seqencer has yet to start) - */ -register QINFIFO { -	address			0x09b -	access_mode RW -} - -/* - * Queue In Count (p. 3-60) - * Number of queued SCBs - */ -register QINCNT	{ -	address			0x09c -	access_mode RO -} - -/* - * SCSIDATL IMAGE Register (p. 5-104) - * Write to this register also go to SCSIDATL but this register will preserve - * the data for later reading as long as the SCSIDATL_IMGEN bit in the - * OPTIONMODE register is set. - */ -register SCSIDATL_IMG { -	address			0x09c -	access_mode RW -} - -/* - * Queue Out FIFO (p. 3-61) - * Queue of SCBs that have completed and await the host - */ -register QOUTFIFO { -	address			0x09d -	access_mode WO -} - -/* - * CRC Control 1 Register (p. 5-105) - * Control bits for the Ultra 160/m CRC facilities - */ -register CRCCONTROL1 { -	address			0x09d -	access_mode RW -	bit	CRCONSEEN	0x80 /* CRC ON Single Edge ENable */ -	bit	CRCVALCHKEN	0x40 /* CRC Value Check Enable */ -	bit	CRCENDCHKEN	0x20 /* CRC End Check Enable */ -	bit	CRCREQCHKEN	0x10 -	bit	TARGCRCENDEN	0x08 /* Enable End CRC transfer when target */ -	bit	TARGCRCCNTEN	0x04 /* Enable CRC transfer when target */ -} - -/* - * Queue Out Count (p. 3-61) - * Number of queued SCBs in the Out FIFO - */ -register QOUTCNT { -	address			0x09e -	access_mode RO -} - -/* - * SCSI Phase Register (p. 5-106) - * Current bus phase - */ -register SCSIPHASE { -	address			0x09e -	access_mode RO -	bit	SP_STATUS		0x20 -	bit	SP_COMMAND		0x10 -	bit	SP_MSG_IN		0x08 -	bit	SP_MSG_OUT		0x04 -	bit	SP_DATA_IN		0x02 -	bit	SP_DATA_OUT	0x01 -} - -/* - * Special Function - */ -register SFUNCT { -	address			0x09f -	access_mode RW -	bit	ALT_MODE	0x80 -} - -/* - * SCB Definition (p. 5-4) - */ -scb { -	address			0x0a0 -	SCB_CONTROL { -		size	1 -		bit	MK_MESSAGE      0x80 -		bit	DISCENB         0x40 -		bit	TAG_ENB		0x20 -		bit	DISCONNECTED	0x04 -		mask	SCB_TAG_TYPE	0x03 -	} -	SCB_TCL { -		size	1 -		bit	SELBUSB		0x08 -		mask	TID		0xf0 -		mask	LID		0x07 -	} -	SCB_TARGET_STATUS { -		size	1 -	} -	SCB_SGCOUNT { -		size	1 -	} -	SCB_SGPTR { -		size	4 -	} -	SCB_RESID_SGCNT { -		size	1 -	} -	SCB_RESID_DCNT	{ -		size	3 -	} -	SCB_DATAPTR { -		size	4 -	} -	SCB_DATACNT { -		/* -		 * Really only 3 bytes, but padded to make -		 * the kernel's job easier. -		 */ -		size	4 -	} -	SCB_CMDPTR { -		size	4 -	} -	SCB_CMDLEN { -		size	1 -	} -	SCB_TAG { -		size	1 -	} -	SCB_NEXT { -		size	1 -	} -	SCB_PREV { -		size	1 -	} -	SCB_BUSYTARGETS { -		size	4 -	} -} - -const	SG_SIZEOF	0x08		/* sizeof(struct ahc_dma) */ - -/* --------------------- AHA-2840-only definitions -------------------- */ - -register SEECTL_2840 { -	address			0x0c0 -	access_mode RW -	bit	CS_2840		0x04 -	bit	CK_2840		0x02 -	bit	DO_2840		0x01 -} - -register STATUS_2840 { -	address			0x0c1 -	access_mode RW -	bit	EEPROM_TF	0x80 -	mask	BIOS_SEL	0x60 -	mask	ADSEL		0x1e -	bit	DI_2840		0x01 -} - -/* --------------------- AIC-7870-only definitions -------------------- */ - -register DSPCISTATUS { -	address			0x086 -	mask	DFTHRSH_100	0xc0 -} - -register CCHADDR { -	address			0x0E0 -	size 8 -} - -register CCHCNT { -	address			0x0E8 -} - -register CCSGRAM { -	address			0x0E9 -} - -register CCSGADDR { -	address			0x0EA -} - -register CCSGCTL { -	address			0x0EB -	bit	CCSGDONE	0x80 -	bit	CCSGEN		0x08 -	bit	FLAG		0x02 -	bit	CCSGRESET	0x01 -} - -register CCSCBCNT { -	address			0xEF -} - -register CCSCBCTL { -	address			0x0EE -	bit	CCSCBDONE	0x80 -	bit	ARRDONE		0x40	/* SCB Array prefetch done */ -	bit	CCARREN		0x10 -	bit	CCSCBEN		0x08 -	bit	CCSCBDIR	0x04 -	bit	CCSCBRESET	0x01 -} - -register CCSCBADDR { -	address			0x0ED -} - -register CCSCBRAM { -	address			0xEC -} - -register CCSCBPTR { -	address			0x0F1 -} - -register HNSCB_QOFF { -	address			0x0F4 -} - -register HESCB_QOFF { -	address			0x0F5 -} - -register SNSCB_QOFF { -	address			0x0F6 -} - -register SESCB_QOFF { -	address			0x0F7 -} - -register SDSCB_QOFF { -	address			0x0F8 -} - -register QOFF_CTLSTA { -	address			0x0FA -	bit	ESTABLISH_SCB_AVAIL	0x80 -	bit	SCB_AVAIL	0x40 -	bit	SNSCB_ROLLOVER	0x20 -	bit	SDSCB_ROLLOVER	0x10 -	bit	SESCB_ROLLOVER	0x08 -	mask	SCB_QSIZE	0x07 -	mask	SCB_QSIZE_256	0x06 -} - -register DFF_THRSH { -	address			0x0FB -	mask	WR_DFTHRSH	0x70 -	mask	RD_DFTHRSH	0x07 -	mask	RD_DFTHRSH_MIN	0x00 -	mask	RD_DFTHRSH_25	0x01 -	mask	RD_DFTHRSH_50	0x02 -	mask	RD_DFTHRSH_63	0x03 -	mask	RD_DFTHRSH_75	0x04 -	mask	RD_DFTHRSH_85	0x05 -	mask	RD_DFTHRSH_90	0x06 -	mask	RD_DFTHRSH_MAX	0x07 -	mask	WR_DFTHRSH_MIN	0x00 -	mask	WR_DFTHRSH_25	0x10 -	mask	WR_DFTHRSH_50	0x20 -	mask	WR_DFTHRSH_63	0x30 -	mask	WR_DFTHRSH_75	0x40 -	mask	WR_DFTHRSH_85	0x50 -	mask	WR_DFTHRSH_90	0x60 -	mask	WR_DFTHRSH_MAX	0x70 -} - -register SG_CACHEPTR { -	access_mode RW -	address			0x0fc -	mask	SG_USER_DATA	0xfc -	bit	LAST_SEG	0x02 -	bit	LAST_SEG_DONE	0x01 -} - -register BRDCTL	{ -	address			0x01d -	bit	BRDDAT7		0x80 -	bit	BRDDAT6		0x40 -	bit	BRDDAT5		0x20 -	bit	BRDSTB		0x10 -	bit	BRDCS		0x08 -	bit	BRDRW		0x04 -	bit	BRDCTL1		0x02 -	bit	BRDCTL0		0x01 -	/* 7890 Definitions */ -	bit	BRDDAT4		0x10 -	bit	BRDDAT3		0x08 -	bit	BRDDAT2		0x04 -	bit	BRDRW_ULTRA2	0x02 -	bit	BRDSTB_ULTRA2	0x01 -} - -/* - * Serial EEPROM Control (p. 4-92 in 7870 Databook) - * Controls the reading and writing of an external serial 1-bit - * EEPROM Device.  In order to access the serial EEPROM, you must - * first set the SEEMS bit that generates a request to the memory - * port for access to the serial EEPROM device.  When the memory - * port is not busy servicing another request, it reconfigures - * to allow access to the serial EEPROM.  When this happens, SEERDY - * gets set high to verify that the memory port access has been - * granted.   - * - * After successful arbitration for the memory port, the SEECS bit of  - * the SEECTL register is connected to the chip select.  The SEECK,  - * SEEDO, and SEEDI are connected to the clock, data out, and data in  - * lines respectively.  The SEERDY bit of SEECTL is useful in that it  - * gives us an 800 nsec timer.  After a write to the SEECTL register,  - * the SEERDY goes high 800 nsec later.  The one exception to this is  - * when we first request access to the memory port.  The SEERDY goes  - * high to signify that access has been granted and, for this case, has  - * no implied timing. - * - * See 93cx6.c for detailed information on the protocol necessary to  - * read the serial EEPROM. - */ -register SEECTL { -	address			0x01e -	bit	EXTARBACK	0x80 -	bit	EXTARBREQ	0x40 -	bit	SEEMS		0x20 -	bit	SEERDY		0x10 -	bit	SEECS		0x08 -	bit	SEECK		0x04 -	bit	SEEDO		0x02 -	bit	SEEDI		0x01 -} -/* ---------------------- Scratch RAM Offsets ------------------------- */ -/* These offsets are either to values that are initialized by the board's - * BIOS or are specified by the sequencer code. - * - * The host adapter card (at least the BIOS) uses 20-2f for SCSI - * device information, 32-33 and 5a-5f as well. As it turns out, the - * BIOS trashes 20-2f, writing the synchronous negotiation results - * on top of the BIOS values, so we re-use those for our per-target - * scratchspace (actually a value that can be copied directly into - * SCSIRATE).  The kernel driver will enable synchronous negotiation - * for all targets that have a value other than 0 in the lower four - * bits of the target scratch space.  This should work regardless of - * whether the bios has been installed. - */ - -scratch_ram { -	address			0x020 - -	/* -	 * 1 byte per target starting at this address for configuration values -	 */ -	TARG_SCSIRATE { -		size		16 -	} -	/* -	 * Bit vector of targets that have ULTRA enabled. -	 */ -	ULTRA_ENB { -		size		2 -	} -	/* -	 * Bit vector of targets that have disconnection disabled. -	 */ -	DISC_DSB { -		size		2 -	} -	/* -	 * Single byte buffer used to designate the type or message -	 * to send to a target. -	 */ -	MSG_OUT { -		size		1 -	} -	/* Parameters for DMA Logic */ -	DMAPARAMS { -		size		1 -		bit	PRELOADEN	0x80 -		bit	WIDEODD		0x40 -		bit	SCSIEN		0x20 -		bit	SDMAEN		0x10 -		bit	SDMAENACK	0x10 -		bit	HDMAEN		0x08 -		bit	HDMAENACK	0x08 -		bit	DIRECTION	0x04 -		bit	FIFOFLUSH	0x02 -		bit	FIFORESET	0x01 -	} -	SEQ_FLAGS { -		size		1 -		bit	IDENTIFY_SEEN	0x80 -		bit	SCBPTR_VALID	0x20 -		bit	DPHASE		0x10 -		bit	AMTARGET	0x08 -		bit	WIDE_BUS	0x02 -		bit	TWIN_BUS	0x01 -	} -	/* -	 * Temporary storage for the -	 * target/channel/lun of a -	 * reconnecting target -	 */ -	SAVED_TCL { -		size		1 -	} -	/* Working value of the number of SG segments left */ -	SG_COUNT { -		size		1 -	} -	/* Working value of SG pointer */ -	SG_NEXT	{ -		size		4 -	} -	/* -	 * The last bus phase as seen by the sequencer.  -	 */ -	LASTPHASE { -		size		1 -		bit	CDI		0x80 -		bit	IOI		0x40 -		bit	MSGI		0x20 -		mask	PHASE_MASK	CDI|IOI|MSGI -		mask	P_DATAOUT	0x00 -		mask	P_DATAIN	IOI -		mask	P_COMMAND	CDI -		mask	P_MESGOUT	CDI|MSGI -		mask	P_STATUS	CDI|IOI -		mask	P_MESGIN	CDI|IOI|MSGI -		mask	P_BUSFREE	0x01 -	} -	/* -	 * head of list of SCBs awaiting -	 * selection -	 */ -	WAITING_SCBH { -		size		1 -	} -	/* -	 * head of list of SCBs that are -	 * disconnected.  Used for SCB -	 * paging. -	 */ -	DISCONNECTED_SCBH { -		size		1 -	} -	/* -	 * head of list of SCBs that are -	 * not in use.  Used for SCB paging. -	 */ -	FREE_SCBH { -		size		1 -	} -	/* -	 * Address of the hardware scb array in the host. -	 */ -	HSCB_ADDR { -		size		4 -	} -	/* -	 * Address of the 256 byte array storing the SCBID of outstanding -	 * untagged SCBs indexed by TCL. -	 */ -	SCBID_ADDR { -		size		4 -	} -	/* -	 * Address of the array of command descriptors used to store -	 * information about incoming selections. -	 */ -	TMODE_CMDADDR { -		size		4 -	} -	KERNEL_QINPOS { -		size		1 -	} -	QINPOS { -		size		1 -	} -	QOUTPOS { -		size		1 -	} -	/* -	 * Offset into the command descriptor array for the next -	 * available desciptor to use. -	 */ -	TMODE_CMDADDR_NEXT { -		size		1 -	} -	ARG_1 { -		size		1 -		mask	SEND_MSG	0x80 -		mask	SEND_SENSE	0x40 -		mask	SEND_REJ	0x20 -		mask	MSGOUT_PHASEMIS	0x10 -		alias	RETURN_1 -	} -	ARG_2 { -		size		1 -		alias	RETURN_2 -	} - -	/* -	 * Snapshot of MSG_OUT taken after each message is sent. -	 */ -	LAST_MSG { -		size		1 -	} - -	/* -	 * Number of times we have filled the CCSGRAM with prefetched -	 * SG elements. -	 */ -	PREFETCH_CNT { -		size		1 -	} - - -	/* -	 * These are reserved registers in the card's scratch ram.  Some of -	 * the values are specified in the AHA2742 technical reference manual -	 * and are initialized by the BIOS at boot time. -	 */ -	SCSICONF { -		address		0x05a -		size		1 -		bit	TERM_ENB	0x80 -		bit	RESET_SCSI	0x40 -		mask	HSCSIID		0x07	/* our SCSI ID */ -		mask	HWSCSIID	0x0f	/* our SCSI ID if Wide Bus */ -	} -	HOSTCONF { -		address		0x05d -		size		1 -	} -	HA_274_BIOSCTRL	{ -		address		0x05f -		size		1 -		mask	BIOSMODE		0x30 -		mask	BIOSDISABLED		0x30	 -		bit	CHANNEL_B_PRIMARY	0x08 -	} -	/* -	 * Per target SCSI offset values for Ultra2 controllers. -	 */ -	TARG_OFFSET { -		address		0x070 -		size		16 -	} -} - -const SCB_LIST_NULL	0xff - -const CCSGADDR_MAX	0x80 -const CCSGRAM_MAXSEGS	16 - -/* Offsets into the SCBID array where different data is stored */ -const UNTAGGEDSCB_OFFSET	0 -const QOUTFIFO_OFFSET		1 -const QINFIFO_OFFSET		2 - -/* WDTR Message values */ -const BUS_8_BIT			0x00 -const BUS_16_BIT		0x01 -const BUS_32_BIT		0x02 - -/* Offset maximums */ -const MAX_OFFSET_8BIT		0x0f -const MAX_OFFSET_16BIT		0x08 -const MAX_OFFSET_ULTRA2		0x7f -const HOST_MSG			0xff - -/* Target mode command processing constants */ -const CMD_GROUP_CODE_SHIFT	0x05 -const CMD_GROUP0_BYTE_DELTA	-4 -const CMD_GROUP2_BYTE_DELTA	-6 -const CMD_GROUP4_BYTE_DELTA	4 -const CMD_GROUP5_BYTE_DELTA	11 - -/* - * Downloaded (kernel inserted) constants - */ - -/* - * Number of command descriptors in the command descriptor array. - */ -const TMODE_NUMCMDS	download diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.seq b/drivers/scsi/aic7xxx_old/aic7xxx.seq deleted file mode 100644 index dc3bb81cff0..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx.seq +++ /dev/null @@ -1,1539 +0,0 @@ -/* - * Adaptec 274x/284x/294x device driver firmware for Linux and FreeBSD. - * - * Copyright (c) 1994-1999 Justin Gibbs. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - *    notice, this list of conditions, and the following disclaimer, - *    without modification, immediately at the beginning of the file. - * 2. The name of the author may not be used to endorse or promote products - *    derived from this software without specific prior written permission. - * - * Where this Software is combined with software released under the terms of  - * the GNU General Public License (GPL) and the terms of the GPL would require the  - * combined work to also be released under the terms of the GPL, the terms - * and conditions of this License will apply in addition to those of the - * GPL with the exception of any terms or conditions of this License that - * conflict with, or are expressly prohibited by, the GPL. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - *	$Id: aic7xxx.seq,v 1.77 1998/06/28 02:58:57 gibbs Exp $ - */ - -#include "aic7xxx.reg" -#include "scsi_message.h" - -/* - * A few words on the waiting SCB list: - * After starting the selection hardware, we check for reconnecting targets - * as well as for our selection to complete just in case the reselection wins - * bus arbitration.  The problem with this is that we must keep track of the - * SCB that we've already pulled from the QINFIFO and started the selection - * on just in case the reselection wins so that we can retry the selection at - * a later time.  This problem cannot be resolved by holding a single entry - * in scratch ram since a reconnecting target can request sense and this will - * create yet another SCB waiting for selection.  The solution used here is to  - * use byte 27 of the SCB as a pseudo-next pointer and to thread a list - * of SCBs that are awaiting selection.  Since 0-0xfe are valid SCB indexes,  - * SCB_LIST_NULL is 0xff which is out of range.  An entry is also added to - * this list every time a request sense occurs or after completing a non-tagged - * command for which a second SCB has been queued.  The sequencer will - * automatically consume the entries. - */ - -reset: -	clr	SCSISIGO;		/* De-assert BSY */ -	and	SXFRCTL1, ~BITBUCKET; -	/* Always allow reselection */ -	mvi	SCSISEQ, ENRSELI|ENAUTOATNP; - -	if ((p->features & AHC_CMD_CHAN) != 0) { -		/* Ensure that no DMA operations are in progress */ -		clr	CCSGCTL; -		clr	CCSCBCTL; -	} - -	call	clear_target_state; -poll_for_work: -	and	SXFRCTL0, ~SPIOEN; -	if ((p->features & AHC_QUEUE_REGS) == 0) { -		mov	A, QINPOS; -	} -poll_for_work_loop: -	if ((p->features & AHC_QUEUE_REGS) == 0) { -		and	SEQCTL, ~PAUSEDIS; -	} -	test	SSTAT0, SELDO|SELDI	jnz selection; -	test	SCSISEQ, ENSELO	jnz poll_for_work; -	if ((p->features & AHC_TWIN) != 0) { -		/* -		 * Twin channel devices cannot handle things like SELTO -		 * interrupts on the "background" channel.  So, if we -		 * are selecting, keep polling the current channel util -		 * either a selection or reselection occurs. -		 */ -		xor	SBLKCTL,SELBUSB;	/* Toggle to the other bus */ -		test	SSTAT0, SELDO|SELDI	jnz selection; -		test	SCSISEQ, ENSELO	jnz poll_for_work; -		xor	SBLKCTL,SELBUSB;	/* Toggle back */ -	} -	cmp	WAITING_SCBH,SCB_LIST_NULL jne start_waiting; -test_queue: -	/* Has the driver posted any work for us? */ -	if ((p->features & AHC_QUEUE_REGS) != 0) { -		test	QOFF_CTLSTA, SCB_AVAIL jz poll_for_work_loop; -		mov	NONE, SNSCB_QOFF; -		inc	QINPOS; -	} else { -		or	SEQCTL, PAUSEDIS; -		cmp	KERNEL_QINPOS, A je poll_for_work_loop; -		inc	QINPOS; -		and	SEQCTL, ~PAUSEDIS; -	} - -/* - * We have at least one queued SCB now and we don't have any  - * SCBs in the list of SCBs awaiting selection.  If we have - * any SCBs available for use, pull the tag from the QINFIFO - * and get to work on it. - */ -	if ((p->flags & AHC_PAGESCBS) != 0) { -		mov	ALLZEROS	call	get_free_or_disc_scb; -	} - -dequeue_scb: -	add	A, -1, QINPOS; -	mvi	QINFIFO_OFFSET call fetch_byte; - -	if ((p->flags & AHC_PAGESCBS) == 0) { -		/* In the non-paging case, the SCBID == hardware SCB index */ -		mov	SCBPTR, RETURN_2; -	} -dma_queued_scb: -/* - * DMA the SCB from host ram into the current SCB location. - */ -	mvi	DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; -	mov	RETURN_2	 call dma_scb; - -/* - * Preset the residual fields in case we never go through a data phase. - * This isn't done by the host so we can avoid a DMA to clear these - * fields for the normal case of I/O that completes without underrun - * or overrun conditions. - */ -	if ((p->features & AHC_CMD_CHAN) != 0) { -		bmov    SCB_RESID_DCNT, SCB_DATACNT, 3; -	} else { -		mov     SCB_RESID_DCNT[0],SCB_DATACNT[0]; -		mov     SCB_RESID_DCNT[1],SCB_DATACNT[1]; -		mov     SCB_RESID_DCNT[2],SCB_DATACNT[2]; -	} -	mov     SCB_RESID_SGCNT, SCB_SGCOUNT; - -start_scb: -	/* -	 * Place us on the waiting list in case our selection -	 * doesn't win during bus arbitration. -	 */ -	mov	SCB_NEXT,WAITING_SCBH; -	mov	WAITING_SCBH, SCBPTR; -start_waiting: -	/* -	 * Pull the first entry off of the waiting SCB list. -	 */ -	mov	SCBPTR, WAITING_SCBH; -	call	start_selection; -	jmp	poll_for_work; - -start_selection: -	if ((p->features & AHC_TWIN) != 0) { -		and	SINDEX,~SELBUSB,SBLKCTL;/* Clear channel select bit */ -		and	A,SELBUSB,SCB_TCL;	/* Get new channel bit */ -		or	SINDEX,A; -		mov	SBLKCTL,SINDEX;		/* select channel */ -	} -initialize_scsiid: -	if ((p->features & AHC_ULTRA2) != 0) { -		and	A, TID, SCB_TCL;	/* Get target ID */ -		and	SCSIID_ULTRA2, OID;	/* Clear old target */ -		or	SCSIID_ULTRA2, A; -	} else { -		and	A, TID, SCB_TCL;	/* Get target ID */ -		and	SCSIID, OID;		/* Clear old target */ -		or	SCSIID, A; -	} -	mov	SCSIDATL, ALLZEROS;		/* clear out the latched */ -						/* data register, this */ -						/* fixes a bug on some */ -						/* controllers where the */ -						/* last byte written to */ -						/* this register can leak */ -						/* onto the data bus at */ -						/* bad times, such as during */ -						/* selection timeouts */ -	mvi	SCSISEQ, ENSELO|ENAUTOATNO|ENRSELI|ENAUTOATNP ret; - -/* - * Initialize Ultra mode setting and clear the SCSI channel. - * SINDEX should contain any additional bit's the client wants - * set in SXFRCTL0. - */ -initialize_channel: -	or	SXFRCTL0, CLRSTCNT|CLRCHN, SINDEX; -	if ((p->features & AHC_ULTRA) != 0) { -ultra: -		mvi	SINDEX, ULTRA_ENB+1; -		test	SAVED_TCL, 0x80		jnz ultra_2; /* Target ID > 7 */ -		dec	SINDEX; -ultra_2: -		mov     FUNCTION1,SAVED_TCL; -		mov     A,FUNCTION1; -		test	SINDIR, A	jz ndx_dtr; -		or	SXFRCTL0, FAST20; -	}  -/* - * Initialize SCSIRATE with the appropriate value for this target. - * The SCSIRATE settings for each target are stored in an array - * based at TARG_SCSIRATE. - */ -ndx_dtr: -	shr	A,4,SAVED_TCL; -	if ((p->features & AHC_TWIN) != 0) { -		test	SBLKCTL,SELBUSB	jz ndx_dtr_2; -		or	SAVED_TCL, SELBUSB;  -		or	A,0x08;			/* Channel B entries add 8 */ -ndx_dtr_2: -	} - -	if ((p->features & AHC_ULTRA2) != 0) { -		add	SINDEX, TARG_OFFSET, A; -		mov	SCSIOFFSET, SINDIR; -	} - -	add	SINDEX,TARG_SCSIRATE,A; -	mov	SCSIRATE,SINDIR ret; - - -selection: -	test	SSTAT0,SELDO	jnz select_out; -/* - * Reselection has been initiated by a target. Make a note that we've been - * reselected, but haven't seen an IDENTIFY message from the target yet. - */ -initiator_reselect: -	mvi	CLRSINT0, CLRSELDI; -	/* XXX test for and handle ONE BIT condition */ -	and	SAVED_TCL, SELID_MASK, SELID; -	mvi	CLRSINT1,CLRBUSFREE; -	or	SIMODE1, ENBUSFREE;		/* -						 * We aren't expecting a -						 * bus free, so interrupt -						 * the kernel driver if it -						 * happens. -						 */ -	mvi	SPIOEN call	initialize_channel; -	mvi	MSG_OUT, MSG_NOOP;		/* No message to send */ -	jmp	ITloop; - -/* - * After the selection, remove this SCB from the "waiting SCB" - * list.  This is achieved by simply moving our "next" pointer into - * WAITING_SCBH.  Our next pointer will be set to null the next time this - * SCB is used, so don't bother with it now. - */ -select_out: -	/* Turn off the selection hardware */ -	mvi	SCSISEQ, ENRSELI|ENAUTOATNP;	/* -						 * ATN on parity errors -						 * for "in" phases -						 */ -	mvi	CLRSINT0, CLRSELDO; -	mov	SCBPTR, WAITING_SCBH; -	mov	WAITING_SCBH,SCB_NEXT; -	mov	SAVED_TCL, SCB_TCL; -	mvi	CLRSINT1,CLRBUSFREE; -	or	SIMODE1, ENBUSFREE;		/* -						 * We aren't expecting a -						 * bus free, so interrupt -						 * the kernel driver if it -						 * happens. -						 */ -	mvi	SPIOEN call	initialize_channel; -/* - * As soon as we get a successful selection, the target should go - * into the message out phase since we have ATN asserted. - */ -	mvi	MSG_OUT, MSG_IDENTIFYFLAG; -	or	SEQ_FLAGS, IDENTIFY_SEEN; - -/* - * Main loop for information transfer phases.  Wait for the target - * to assert REQ before checking MSG, C/D and I/O for the bus phase. - */ -ITloop: -	call	phase_lock; - -	mov	A, LASTPHASE; - -	test	A, ~P_DATAIN	jz p_data; -	cmp	A,P_COMMAND	je p_command; -	cmp	A,P_MESGOUT	je p_mesgout; -	cmp	A,P_STATUS	je p_status; -	cmp	A,P_MESGIN	je p_mesgin; - -	mvi	INTSTAT,BAD_PHASE;	/* unknown phase - signal driver */ -	jmp	ITloop;			/* Try reading the bus again. */ - -await_busfree: -	and	SIMODE1, ~ENBUSFREE; -	call	clear_target_state; -	mov	NONE, SCSIDATL;		/* Ack the last byte */ -	and	SXFRCTL0, ~SPIOEN; -	test	SSTAT1,REQINIT|BUSFREE	jz .; -	test	SSTAT1, BUSFREE jnz poll_for_work; -	mvi	INTSTAT, BAD_PHASE; -	 -clear_target_state: -	/* -	 * We assume that the kernel driver may reset us -	 * at any time, even in the middle of a DMA, so -	 * clear DFCNTRL too. -	 */ -	clr	DFCNTRL; - -	/* -	 * We don't know the target we will connect to, -	 * so default to narrow transfers to avoid -	 * parity problems. -	 */ -	if ((p->features & AHC_ULTRA2) != 0) { -		bmov    SCSIRATE, ALLZEROS, 2; -	} else { -		clr     SCSIRATE; -		and     SXFRCTL0, ~(FAST20); -	} -	mvi	LASTPHASE, P_BUSFREE; -	/* clear target specific flags */ -	clr	SEQ_FLAGS ret; - - -data_phase_reinit: -/* - * If we re-enter the data phase after going through another phase, the - * STCNT may have been cleared, so restore it from the residual field. - * On Ultra2, we have to put it into the HCNT field because we have to - * drop the data down into the shadow layer via the preload ability. - */ - 	if ((p->features & AHC_ULTRA2) != 0) { -		bmov	HADDR, SHADDR, 4; -		bmov    HCNT, SCB_RESID_DCNT, 3; -	} -	if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { -		bmov    STCNT, SCB_RESID_DCNT, 3; -	} -	if ((p->features & AHC_CMD_CHAN) == 0) { -		mvi	DINDEX, STCNT; -		mvi	SCB_RESID_DCNT	call bcopy_3; -	} -	jmp	data_phase_loop; -p_data: - 	if ((p->features & AHC_ULTRA2) != 0) { -		mvi	DMAPARAMS, PRELOADEN|SCSIEN|HDMAEN; -	} else { -		mvi	DMAPARAMS, WIDEODD|SCSIEN|SDMAEN|HDMAEN|FIFORESET; -	} -	test	LASTPHASE, IOI jnz . + 2; -	or	DMAPARAMS, DIRECTION; -	call	assert;		/* -				 * Ensure entering a data -				 * phase is okay - seen identify, etc. -				 */ -	if ((p->features & AHC_CMD_CHAN) != 0) { -		mvi	CCSGADDR, CCSGADDR_MAX; -	} - -	test	SEQ_FLAGS, DPHASE	jnz data_phase_reinit; -	or	SEQ_FLAGS, DPHASE;	/* we've seen a data phase */ -	/* -	 * Initialize the DMA address and counter from the SCB. -	 * Also set SG_COUNT and SG_NEXT in memory since we cannot -	 * modify the values in the SCB itself until we see a -	 * save data pointers message. -	 */ -	if ((p->features & AHC_CMD_CHAN) != 0) { -		bmov	HADDR, SCB_DATAPTR, 7; -		bmov    SG_COUNT, SCB_SGCOUNT, 5; -		if ((p->features & AHC_ULTRA2) == 0) { -			bmov    STCNT, HCNT, 3; -		} -	} else { -		mvi	DINDEX, HADDR; -		mvi	SCB_DATAPTR	call bcopy_7; -		call	set_stcnt_from_hcnt; -		mvi	DINDEX, SG_COUNT; -		mvi	SCB_SGCOUNT	call bcopy_5; -	} -data_phase_loop: -	/* Guard against overruns */ -	test	SG_COUNT, 0xff jnz data_phase_inbounds; -/* - * Turn on 'Bit Bucket' mode, set the transfer count to - * 16meg and let the target run until it changes phase. - * When the transfer completes, notify the host that we - * had an overrun. - */ -	or	SXFRCTL1,BITBUCKET; -	and	DMAPARAMS, ~(HDMAEN|SDMAEN); -	if ((p->features & AHC_ULTRA2) != 0) { -		bmov	HCNT, ALLONES, 3; -	} -	if ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895) { -		bmov	STCNT, ALLONES, 3; -	} -	if ((p->features & AHC_CMD_CHAN) == 0) { -		mvi	STCNT[0], 0xFF; -		mvi	STCNT[1], 0xFF; -		mvi	STCNT[2], 0xFF; -	} - -data_phase_inbounds: -/* If we are the last SG block, tell the hardware. */ -	if ((p->features & AHC_ULTRA2) != 0) { -		shl	A, 2, SG_COUNT; -		cmp	SG_COUNT,0x01 jne data_phase_wideodd; -		or	A, LAST_SEG; -	} else { -		cmp	SG_COUNT,0x01 jne data_phase_wideodd; -		and	DMAPARAMS, ~WIDEODD; -	} -data_phase_wideodd: -	if ((p->features & AHC_ULTRA2) != 0) {	 -		mov	SG_CACHEPTR, A; -		mov	DFCNTRL, DMAPARAMS; /* start the operation */ -		test	SXFRCTL1, BITBUCKET jnz data_phase_overrun; -u2_preload_wait: -		test	SSTAT1, PHASEMIS jnz u2_phasemis; -		test	DFSTATUS, PRELOAD_AVAIL jz u2_preload_wait; -	} else { -		mov	DMAPARAMS  call dma; -data_phase_dma_done: -/* Go tell the host about any overruns */ -		test	SXFRCTL1,BITBUCKET jnz data_phase_overrun; - -/* Exit if we had an underrun.  dma clears SINDEX in this case. */ -		test	SINDEX,0xff	jz data_phase_finish; -	} -/* - * Advance the scatter-gather pointers  - */ -sg_advance: -	if ((p->features & AHC_ULTRA2) != 0) { -		cmp	SG_COUNT, 0x01	je u2_data_phase_finish; -	} else { -		dec	SG_COUNT; -		test	SG_COUNT, 0xff	jz data_phase_finish; -	} - -	if ((p->features & AHC_CMD_CHAN) != 0) { - -		/* -		 * Do we have any prefetch left??? -		 */ -		cmp	CCSGADDR, CCSGADDR_MAX jne prefetch_avail; - -		/* -		 * Fetch MIN(CCSGADDR_MAX, (SG_COUNT * 8)) bytes. -		 */ -		add	A, -(CCSGRAM_MAXSEGS + 1), SG_COUNT; -		mvi	A, CCSGADDR_MAX; -		jc	. + 2; -		shl	A, 3, SG_COUNT; -		mov	CCHCNT, A; -		bmov	CCHADDR, SG_NEXT, 4; -		mvi	CCSGCTL, CCSGEN|CCSGRESET; -		test	CCSGCTL, CCSGDONE jz .; -		and	CCSGCTL, ~CCSGEN; -		test	CCSGCTL, CCSGEN jnz .; -		mvi	CCSGCTL, CCSGRESET; -prefetch_avail: -		bmov 	HADDR, CCSGRAM, 8; -		if ((p->features & AHC_ULTRA2) == 0) { -			bmov    STCNT, HCNT, 3; -		} else { -			dec	SG_COUNT; -		} -	} else { -		mvi	DINDEX, HADDR; -		mvi	SG_NEXT	call bcopy_4; - -		mvi	HCNT[0],SG_SIZEOF; -		clr	HCNT[1]; -		clr	HCNT[2]; - -		or	DFCNTRL, HDMAEN|DIRECTION|FIFORESET; - -		call	dma_finish; - -/* - * Copy data from FIFO into SCB data pointer and data count. - * This assumes that the SG segments are of the form: - * struct ahc_dma_seg { - *	u_int32_t	addr;	four bytes, little-endian order - *	u_int32_t	len;	four bytes, little endian order - * }; - */ - 		mvi	DINDEX, HADDR; -		call	dfdat_in_7; -		call	set_stcnt_from_hcnt; -	} -/* Advance the SG pointer */ -	clr	A;		/* add sizeof(struct scatter) */ -	add	SG_NEXT[0],SG_SIZEOF; -	adc	SG_NEXT[1],A; - -	if ((p->features & AHC_ULTRA2) != 0) { -		jmp	data_phase_loop; -	} else { -		test    SSTAT1, REQINIT jz .; -		test	SSTAT1,PHASEMIS	jz data_phase_loop; -	} - - -/* - * We've loaded all of our segments into the preload layer.  Now, we simply - * have to wait for it to finish or for us to get a phasemis.  And, since - * we'll get a phasemis if we do finish, all we really need to do is wait - * for a phasemis then check if we did actually complete all the segments. - */ -	if ((p->features & AHC_ULTRA2) != 0) { -u2_data_phase_finish: -		test	SSTAT1, PHASEMIS jnz u2_phasemis; -		test	SG_CACHEPTR, LAST_SEG_DONE jz u2_data_phase_finish; -		clr	SG_COUNT; -		test	SSTAT1, REQINIT	jz .; -		test	SSTAT1, PHASEMIS jz data_phase_loop; -u2_phasemis: -		call	ultra2_dmafinish; -		test	SG_CACHEPTR, LAST_SEG_DONE jnz data_phase_finish; -		test	SSTAT2, SHVALID jnz u2_fixup_residual; -		mvi	INTSTAT, SEQ_SG_FIXUP; -		jmp	data_phase_finish; -u2_fixup_residual: -		shr	ARG_1, 2, SG_CACHEPTR; -u2_phasemis_loop: -		and	A, 0x3f, SG_COUNT; -		cmp	ARG_1, A je data_phase_finish; -/* - * Subtract SG_SIZEOF from the SG_NEXT pointer and add 1 to the SG_COUNT - */ - 		clr	A; -		add	SG_NEXT[0], -SG_SIZEOF; -		adc	SG_NEXT[1], 0xff; -		inc	SG_COUNT; -		jmp	u2_phasemis_loop; -	} - -data_phase_finish: -/* - * After a DMA finishes, save the SG and STCNT residuals back into the SCB - * We use STCNT instead of HCNT, since it's a reflection of how many bytes  - * were transferred on the SCSI (as opposed to the host) bus. - */ -	if ((p->features & AHC_CMD_CHAN) != 0) { -		bmov    SCB_RESID_DCNT, STCNT, 3; -		mov	SCB_RESID_SGCNT, SG_COUNT; -		if ((p->features & AHC_ULTRA2) != 0) { -			or	SXFRCTL0, CLRSTCNT|CLRCHN; -		} -	} else { -		mov	SCB_RESID_DCNT[0],STCNT[0]; -		mov	SCB_RESID_DCNT[1],STCNT[1]; -		mov	SCB_RESID_DCNT[2],STCNT[2]; -		mov	SCB_RESID_SGCNT, SG_COUNT; -	} - -	jmp	ITloop; - -data_phase_overrun: -/* - * Turn off BITBUCKET mode and notify the host - */ -	if ((p->features & AHC_ULTRA2) != 0) { -/* - * Wait for the target to quit transferring data on the SCSI bus - */ - 		test	SSTAT1, PHASEMIS jz .; -		call	ultra2_dmafinish; -	} -	and	SXFRCTL1, ~BITBUCKET; -	mvi	INTSTAT,DATA_OVERRUN; -	jmp	ITloop; - - - - -/* - * Actually turn off the DMA hardware, save our current position into the - * proper residual variables, wait for the next REQ signal, then jump to - * the ITloop.  Jumping to the ITloop ensures that if we happen to get - * brought into the data phase again (or are still in it after our last - * segment) that we will properly signal an overrun to the kernel. - */ -	if ((p->features & AHC_ULTRA2) != 0) { -ultra2_dmafinish: -		test	DFCNTRL, DIRECTION jnz ultra2_dmahalt; -		and	DFCNTRL, ~SCSIEN; -		test	DFCNTRL, SCSIEN jnz .; -		if ((p->bugs & AHC_BUG_AUTOFLUSH) != 0) { -			or	DFCNTRL, FIFOFLUSH; -		} -ultra2_dmafifoflush: -		if ((p->bugs & AHC_BUG_AUTOFLUSH) != 0) { -			/* -			 * hardware bug alert!  This needless set of jumps -			 * works around a glitch in the silicon.  When the -			 * PCI DMA fifo goes empty, but there is still SCSI -			 * data to be flushed into the PCI DMA fifo (and from -			 * there on into main memory), the FIFOEMP bit will -			 * come on between the time when the PCI DMA buffer -			 * went empty and the next bit of data is copied from -			 * the SCSI fifo into the PCI fifo.  It should only -			 * come on when both FIFOs (meaning the entire FIFO -			 * chain) are empty.  Since it can take up to 4 cycles -			 * for new data to be copied from the SCSI fifo into -			 * the PCI fifo, testing for FIFOEMP status for 4 -			 * extra times gives the needed time for any -			 * remaining SCSI fifo data to be put in the PCI fifo -			 * before we declare it *truly* empty. -			 */ -			test	DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; -			test	DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; -			test	DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; -			test	DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; -		} -		test	DFSTATUS, FIFOEMP jz ultra2_dmafifoflush; -		test	DFSTATUS, MREQPEND	jnz .; -ultra2_dmahalt: -		and     DFCNTRL, ~(HDMAEN|SCSIEN); -		test	DFCNTRL, (HDMAEN|SCSIEN) jnz .; -		ret; -	} - -/* - * Command phase.  Set up the DMA registers and let 'er rip. - */ -p_command: -	call	assert; - -/* - * Load HADDR and HCNT. - */ -	if ((p->features & AHC_CMD_CHAN) != 0) { -		bmov	HADDR, SCB_CMDPTR, 5; -		bmov	HCNT[1], ALLZEROS, 2; -		if ((p->features & AHC_ULTRA2) == 0) { -			bmov	STCNT, HCNT, 3; -		} -	} else { -		mvi	DINDEX, HADDR; -		mvi	SCB_CMDPTR	call bcopy_5; -		clr	HCNT[1]; -		clr	HCNT[2]; -		call	set_stcnt_from_hcnt; -	} - -	if ((p->features & AHC_ULTRA2) == 0) { -		mvi	(SCSIEN|SDMAEN|HDMAEN|DIRECTION|FIFORESET) call dma; -	} else { -		mvi	DFCNTRL, (PRELOADEN|SCSIEN|HDMAEN|DIRECTION); -		test	SSTAT0, SDONE jnz .; -p_command_dma_loop: -		test	SSTAT0, SDONE jnz p_command_ultra2_dma_done; -		test	SSTAT1,PHASEMIS	jz p_command_dma_loop;	/* ie. underrun */ -p_command_ultra2_dma_done: -		test	SCSISIGI, REQI	jz p_command_ultra2_shutdown; -		test	SSTAT1, (PHASEMIS|REQINIT)	jz p_command_ultra2_dma_done; -p_command_ultra2_shutdown: -		and     DFCNTRL, ~(HDMAEN|SCSIEN); -		test	DFCNTRL, (HDMAEN|SCSIEN) jnz .; -		or	SXFRCTL0, CLRSTCNT|CLRCHN; -	} -	jmp	ITloop; - -/* - * Status phase.  Wait for the data byte to appear, then read it - * and store it into the SCB. - */ -p_status: -	call	assert; - -	mov	SCB_TARGET_STATUS, SCSIDATL; -	jmp	ITloop; - -/* - * Message out phase.  If MSG_OUT is 0x80, build I full indentify message - * sequence and send it to the target.  In addition, if the MK_MESSAGE bit - * is set in the SCB_CONTROL byte, interrupt the host and allow it to send - * it's own message. - *  - * If MSG_OUT is == HOST_MSG, also interrupt the host and take a message. - * This is done to allow the host to send messages outside of an identify - * sequence while protecting the seqencer from testing the MK_MESSAGE bit - * on an SCB that might not be for the current nexus. (For example, a - * BDR message in response to a bad reselection would leave us pointed to - * an SCB that doesn't have anything to do with the current target). - * Otherwise, treat MSG_OUT as a 1 byte message to send (abort, abort tag, - * bus device reset). - * - * When there are no messages to send, MSG_OUT should be set to MSG_NOOP, - * in case the target decides to put us in this phase for some strange - * reason. - */ -p_mesgout_retry: -	or      SCSISIGO,ATNO,LASTPHASE;/* turn on ATN for the retry */ -p_mesgout: -	mov	SINDEX, MSG_OUT; -	cmp	SINDEX, MSG_IDENTIFYFLAG jne p_mesgout_from_host; -p_mesgout_identify: -	if ((p->features & AHC_WIDE) != 0) { -		and	SINDEX,0xf,SCB_TCL;	/* lun */ -	} else { -		and	SINDEX,0x7,SCB_TCL;	/* lun */ -	} -	and	A,DISCENB,SCB_CONTROL;	/* mask off disconnect privilege */ -	or	SINDEX,A;		/* or in disconnect privilege */ -	or	SINDEX,MSG_IDENTIFYFLAG; -p_mesgout_mk_message: -	test	SCB_CONTROL,MK_MESSAGE  jz p_mesgout_tag; -	mov	SCSIDATL, SINDEX;	/* Send the last byte */ -	jmp	p_mesgout_from_host + 1;/* Skip HOST_MSG test */ -/* - * Send a tag message if TAG_ENB is set in the SCB control block. - * Use SCB_TAG (the position in the kernel's SCB array) as the tag value. - */ -p_mesgout_tag: -	test	SCB_CONTROL,TAG_ENB jz  p_mesgout_onebyte; -	mov	SCSIDATL, SINDEX;	/* Send the identify message */ -	call	phase_lock; -	cmp	LASTPHASE, P_MESGOUT	jne p_mesgout_done; -	and	SCSIDATL,TAG_ENB|SCB_TAG_TYPE,SCB_CONTROL; -	call	phase_lock; -	cmp	LASTPHASE, P_MESGOUT	jne p_mesgout_done; -	mov	SCB_TAG	jmp p_mesgout_onebyte; -/* - * Interrupt the driver, and allow it to send a message - * if it asks. - */ -p_mesgout_from_host: -	cmp	SINDEX, HOST_MSG	jne p_mesgout_onebyte; -	mvi     INTSTAT,AWAITING_MSG; -	nop; -	/* -	 * Did the host detect a phase change? -	 */ -	cmp	RETURN_1, MSGOUT_PHASEMIS je p_mesgout_done; - -p_mesgout_onebyte: -	mvi	CLRSINT1, CLRATNO; -	mov	SCSIDATL, SINDEX; - -/* - * If the next bus phase after ATN drops is a message out, it means - * that the target is requesting that the last message(s) be resent. - */ -	call	phase_lock; -	cmp     LASTPHASE, P_MESGOUT    je p_mesgout_retry; - -p_mesgout_done: -	mvi	CLRSINT1,CLRATNO;	/* Be sure to turn ATNO off */ -	mov	LAST_MSG, MSG_OUT; -	cmp	MSG_OUT, MSG_IDENTIFYFLAG jne . + 2; -	and	SCB_CONTROL, ~MK_MESSAGE; -	mvi	MSG_OUT, MSG_NOOP;	/* No message left */ -	jmp	ITloop; - -/* - * Message in phase.  Bytes are read using Automatic PIO mode. - */ -p_mesgin: -	mvi	ACCUM		call inb_first;	/* read the 1st message byte */ - -	test	A,MSG_IDENTIFYFLAG	jnz mesgin_identify; -	cmp	A,MSG_DISCONNECT	je mesgin_disconnect; -	cmp	A,MSG_SAVEDATAPOINTER	je mesgin_sdptrs; -	cmp	ALLZEROS,A		je mesgin_complete; -	cmp	A,MSG_RESTOREPOINTERS	je mesgin_rdptrs; -	cmp	A,MSG_EXTENDED		je mesgin_extended; -	cmp	A,MSG_MESSAGE_REJECT	je mesgin_reject; -	cmp	A,MSG_NOOP		je mesgin_done; -	cmp	A,MSG_IGN_WIDE_RESIDUE	je mesgin_wide_residue; - -rej_mesgin: -/* - * We have no idea what this message in is, so we issue a message reject - * and hope for the best.  In any case, rejection should be a rare - * occurrence - signal the driver when it happens. - */ -	mvi	INTSTAT,SEND_REJECT;		/* let driver know */ - -	mvi	MSG_MESSAGE_REJECT	call mk_mesg; - -mesgin_done: -	mov	NONE,SCSIDATL;		/*dummy read from latch to ACK*/ -	jmp	ITloop; - - -mesgin_complete: -/* - * We got a "command complete" message, so put the SCB_TAG into the QOUTFIFO, - * and trigger a completion interrupt.  Before doing so, check to see if there - * is a residual or the status byte is something other than STATUS_GOOD (0). - * In either of these conditions, we upload the SCB back to the host so it can - * process this information.  In the case of a non zero status byte, we  - * additionally interrupt the kernel driver synchronously, allowing it to - * decide if sense should be retrieved.  If the kernel driver wishes to request - * sense, it will fill the kernel SCB with a request sense command and set - * RETURN_1 to SEND_SENSE.  If RETURN_1 is set to SEND_SENSE we redownload - * the SCB, and process it as the next command by adding it to the waiting list. - * If the kernel driver does not wish to request sense, it need only clear - * RETURN_1, and the command is allowed to complete normally.  We don't bother - * to post to the QOUTFIFO in the error cases since it would require extra - * work in the kernel driver to ensure that the entry was removed before the - * command complete code tried processing it. - */ - -/* - * First check for residuals - */ -	test	SCB_RESID_SGCNT,0xff	jnz upload_scb; -	test	SCB_TARGET_STATUS,0xff	jz complete;	/* Good Status? */ -upload_scb: -	mvi	DMAPARAMS, FIFORESET; -	mov	SCB_TAG		call dma_scb; -check_status: -	test	SCB_TARGET_STATUS,0xff	jz complete;	/* Just a residual? */ -	mvi	INTSTAT,BAD_STATUS;			/* let driver know */ -	nop; -	cmp	RETURN_1, SEND_SENSE	jne complete; -	/* This SCB becomes the next to execute as it will retrieve sense */ -	mvi	DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; -	mov	SCB_TAG		call dma_scb; -add_to_waiting_list: -	mov	SCB_NEXT,WAITING_SCBH; -	mov	WAITING_SCBH, SCBPTR; -	/* -	 * Prepare our selection hardware before the busfree so we have a -	 * high probability of winning arbitration. -	 */ -	call	start_selection; -	jmp	await_busfree; - -complete: -	/* If we are untagged, clear our address up in host ram */ -	test	SCB_CONTROL, TAG_ENB jnz complete_post; -	mov	A, SAVED_TCL; -	mvi	UNTAGGEDSCB_OFFSET call post_byte_setup; -	mvi	SCB_LIST_NULL call post_byte; - -complete_post: -	/* Post the SCB and issue an interrupt */ -	if ((p->features & AHC_QUEUE_REGS) != 0) { -		mov	A, SDSCB_QOFF; -	} else { -		mov	A, QOUTPOS; -	} -	mvi	QOUTFIFO_OFFSET call post_byte_setup; -	mov	SCB_TAG call post_byte; -	if ((p->features & AHC_QUEUE_REGS) == 0) { -		inc 	QOUTPOS; -	} -	mvi	INTSTAT,CMDCMPLT; - -add_to_free_list: -	call	add_scb_to_free_list; -	jmp	await_busfree; - -/* - * Is it an extended message?  Copy the message to our message buffer and - * notify the host.  The host will tell us whether to reject this message, - * respond to it with the message that the host placed in our message buffer, - * or simply to do nothing. - */ -mesgin_extended: -	mvi	INTSTAT,EXTENDED_MSG;		/* let driver know */ -	jmp	ITloop; - -/* - * Is it a disconnect message?  Set a flag in the SCB to remind us - * and await the bus going free. - */ -mesgin_disconnect: -	or	SCB_CONTROL,DISCONNECTED; -	call	add_scb_to_disc_list; -	jmp	await_busfree; - -/* - * Save data pointers message: - * Copying RAM values back to SCB, for Save Data Pointers message, but - * only if we've actually been into a data phase to change them.  This - * protects against bogus data in scratch ram and the residual counts - * since they are only initialized when we go into data_in or data_out. - */ -mesgin_sdptrs: -	test	SEQ_FLAGS, DPHASE	jz mesgin_done; -	/* -	 * The SCB SGPTR becomes the next one we'll download, -	 * and the SCB DATAPTR becomes the current SHADDR. -	 * Use the residual number since STCNT is corrupted by -	 * any message transfer. -	 */ -	if ((p->features & AHC_CMD_CHAN) != 0) { -		bmov    SCB_SGCOUNT, SG_COUNT, 5; -		bmov    SCB_DATAPTR, SHADDR, 4; -		bmov    SCB_DATACNT, SCB_RESID_DCNT, 3; -	} else { -		mvi	DINDEX, SCB_SGCOUNT; -		mvi	SG_COUNT	call bcopy_5; -		mvi	DINDEX, SCB_DATAPTR; -		mvi	SHADDR		call bcopy_4; -		mvi	SCB_RESID_DCNT	call	bcopy_3; -	} -	jmp	mesgin_done; - -/* - * Restore pointers message?  Data pointers are recopied from the - * SCB anytime we enter a data phase for the first time, so all - * we need to do is clear the DPHASE flag and let the data phase - * code do the rest. - */ -mesgin_rdptrs: -	and	SEQ_FLAGS, ~DPHASE;		/* -						 * We'll reload them -						 * the next time through -						 * the dataphase. -						 */ -	jmp	mesgin_done; - -/* - * Identify message?  For a reconnecting target, this tells us the lun - * that the reconnection is for - find the correct SCB and switch to it, - * clearing the "disconnected" bit so we don't "find" it by accident later. - */ -mesgin_identify: -	 -	if ((p->features & AHC_WIDE) != 0) { -		and	A,0x0f;		/* lun in lower four bits */ -	} else { -		and	A,0x07;		/* lun in lower three bits */ -	} -	or      SAVED_TCL,A;		/* SAVED_TCL should be complete now */ - -	mvi     ARG_2, SCB_LIST_NULL;   /* SCBID of prev SCB in disc List */ -	call	get_untagged_SCBID; -	cmp	ARG_1, SCB_LIST_NULL	je snoop_tag; -	if ((p->flags & AHC_PAGESCBS) != 0) { -		test	SEQ_FLAGS, SCBPTR_VALID	jz use_retrieveSCB; -	} -	/* -	 * If the SCB was found in the disconnected list (as is -	 * always the case in non-paging scenarios), SCBPTR is already -	 * set to the correct SCB.  So, simply setup the SCB and get -	 * on with things. -	 */ -	mov	SCBPTR	call rem_scb_from_disc_list; -	jmp	setup_SCB; -/* - * Here we "snoop" the bus looking for a SIMPLE QUEUE TAG message. - * If we get one, we use the tag returned to find the proper - * SCB.  With SCB paging, this requires using search for both tagged - * and non-tagged transactions since the SCB may exist in any slot. - * If we're not using SCB paging, we can use the tag as the direct - * index to the SCB. - */ -snoop_tag: -	mov	NONE,SCSIDATL;		/* ACK Identify MSG */ -snoop_tag_loop: -	call	phase_lock; -	cmp	LASTPHASE, P_MESGIN	jne not_found; -	cmp	SCSIBUSL,MSG_SIMPLE_Q_TAG jne not_found; -get_tag: -	mvi	ARG_1	call inb_next;	/* tag value */ - -use_retrieveSCB: -	call	retrieveSCB; -setup_SCB: -	mov	A, SAVED_TCL; -	cmp	SCB_TCL, A	jne not_found_cleanup_scb; -	test	SCB_CONTROL,DISCONNECTED jz not_found_cleanup_scb; -	and	SCB_CONTROL,~DISCONNECTED; -	or	SEQ_FLAGS,IDENTIFY_SEEN;	  /* make note of IDENTIFY */ -	/* See if the host wants to send a message upon reconnection */ -	test	SCB_CONTROL, MK_MESSAGE jz mesgin_done; -	and	SCB_CONTROL, ~MK_MESSAGE; -	mvi	HOST_MSG	call mk_mesg; -	jmp	mesgin_done; - -not_found_cleanup_scb: -	test	SCB_CONTROL, DISCONNECTED jz . + 3; -	call	add_scb_to_disc_list; -	jmp	not_found; -	call	add_scb_to_free_list; -not_found: -	mvi	INTSTAT, NO_MATCH; -	mvi	MSG_BUS_DEV_RESET	call mk_mesg; -	jmp	mesgin_done; - -/* - * Message reject?  Let the kernel driver handle this.  If we have an  - * outstanding WDTR or SDTR negotiation, assume that it's a response from  - * the target selecting 8bit or asynchronous transfer, otherwise just ignore  - * it since we have no clue what it pertains to. - */ -mesgin_reject: -	mvi	INTSTAT, REJECT_MSG; -	jmp	mesgin_done; - -/* - * Wide Residue.  We handle the simple cases, but pass of the one hard case - * to the kernel (when the residue byte happened to cause us to advance our - * sg element array, so we know have to back that advance out). - */ -mesgin_wide_residue: -	mvi	ARG_1	call inb_next; /* ACK the wide_residue and get */ -				       /* the size byte */ -/* - * In order for this to be reliable, we have to do all sorts of horrible - * magic in terms of resetting the datafifo and reloading the shadow layer - * with the correct new values (so that a subsequent save data pointers - * message will do the right thing).  We let the kernel do that work. - */ - 	mvi	INTSTAT, WIDE_RESIDUE; -	jmp	mesgin_done; -	 -/* - * [ ADD MORE MESSAGE HANDLING HERE ] - */ - -/* - * Locking the driver out, build a one-byte message passed in SINDEX - * if there is no active message already.  SINDEX is returned intact. - */ -mk_mesg: -	or	SCSISIGO,ATNO,LASTPHASE;/* turn on ATNO */ -	mov	MSG_OUT,SINDEX ret; - -/* - * Functions to read data in Automatic PIO mode. - * - * According to Adaptec's documentation, an ACK is not sent on input from - * the target until SCSIDATL is read from.  So we wait until SCSIDATL is - * latched (the usual way), then read the data byte directly off the bus - * using SCSIBUSL.  When we have pulled the ATN line, or we just want to - * acknowledge the byte, then we do a dummy read from SCISDATL.  The SCSI - * spec guarantees that the target will hold the data byte on the bus until - * we send our ACK. - * - * The assumption here is that these are called in a particular sequence, - * and that REQ is already set when inb_first is called.  inb_{first,next} - * use the same calling convention as inb. - */ - -inb_next: -	mov	NONE,SCSIDATL;		/*dummy read from latch to ACK*/ -inb_next_wait: -	/* -	 * If there is a parity error, wait for the kernel to -	 * see the interrupt and prepare our message response -	 * before continuing. -	 */ -	test	SSTAT1, REQINIT	jz inb_next_wait; -	test	SSTAT1, SCSIPERR jnz .; -	and	LASTPHASE, PHASE_MASK, SCSISIGI; -	cmp	LASTPHASE, P_MESGIN jne mesgin_phasemis; -inb_first: -	mov	DINDEX,SINDEX; -	mov	DINDIR,SCSIBUSL	ret;		/*read byte directly from bus*/ -inb_last: -	mov	NONE,SCSIDATL ret;		/*dummy read from latch to ACK*/ - -	 -mesgin_phasemis: -/* - * We expected to receive another byte, but the target changed phase - */ -	mvi	INTSTAT, MSGIN_PHASEMIS; -	jmp	ITloop; - -/* - * DMA data transfer.  HADDR and HCNT must be loaded first, and - * SINDEX should contain the value to load DFCNTRL with - 0x3d for - * host->scsi, or 0x39 for scsi->host.  The SCSI channel is cleared - * during initialization. - */ -if ((p->features & AHC_ULTRA2) == 0) { -dma: -	mov	DFCNTRL,SINDEX; -dma_loop: -	test	SSTAT0,DMADONE	jnz dma_dmadone; -	test	SSTAT1,PHASEMIS	jz dma_loop;	/* ie. underrun */ -dma_phasemis: -	test	SSTAT0,SDONE	jnz dma_checkfifo; -	mov	SINDEX,ALLZEROS;		/* Notify caller of phasemiss */ - -/* - * We will be "done" DMAing when the transfer count goes to zero, or - * the target changes the phase (in light of this, it makes sense that - * the DMA circuitry doesn't ACK when PHASEMIS is active).  If we are - * doing a SCSI->Host transfer, the data FIFO should be flushed auto- - * magically on STCNT=0 or a phase change, so just wait for FIFO empty - * status. - */ -dma_checkfifo: -	test	DFCNTRL,DIRECTION	jnz dma_fifoempty; -dma_fifoflush: -	test	DFSTATUS,FIFOEMP	jz dma_fifoflush; - -dma_fifoempty: -	/* Don't clobber an inprogress host data transfer */ -	test	DFSTATUS, MREQPEND	jnz dma_fifoempty; -/* - * Now shut the DMA enables off and make sure that the DMA enables are  - * actually off first lest we get an ILLSADDR. - */ -dma_dmadone: -	cmp	LASTPHASE, P_COMMAND	je dma_await_nreq; -	test	SCSIRATE, 0x0f	jnz dma_shutdown; -dma_await_nreq: -	test	SCSISIGI, REQI	jz dma_shutdown; -	test	SSTAT1, (PHASEMIS|REQINIT)	jz dma_await_nreq; -dma_shutdown: -	and	DFCNTRL, ~(SCSIEN|SDMAEN|HDMAEN); -dma_halt: -	/* -	 * Some revisions of the aic7880 have a problem where, if the -	 * data fifo is full, but the PCI input latch is not empty,  -	 * HDMAEN cannot be cleared.  The fix used here is to attempt -	 * to drain the data fifo until there is space for the input -	 * latch to drain and HDMAEN de-asserts. -	 */ -	if ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0) { -		mov	NONE, DFDAT; -	} -	test	DFCNTRL, (SCSIEN|SDMAEN|HDMAEN) jnz dma_halt; -} -return: -	ret; - -/* - * Assert that if we've been reselected, then we've seen an IDENTIFY - * message. - */ -assert: -	test	SEQ_FLAGS,IDENTIFY_SEEN	jnz return;	/* seen IDENTIFY? */ - -	mvi	INTSTAT,NO_IDENT 	ret;	/* no - tell the kernel */ - -/* - * Locate a disconnected SCB either by SAVED_TCL (ARG_1 is SCB_LIST_NULL) - * or by the SCBID ARG_1.  The search begins at the SCB index passed in - * via SINDEX which is an SCB that must be on the disconnected list.  If - * the SCB cannot be found, SINDEX will be SCB_LIST_NULL, otherwise, SCBPTR - * is set to the proper SCB. - */ -findSCB: -	mov	SCBPTR,SINDEX;			/* Initialize SCBPTR */ -	cmp	ARG_1, SCB_LIST_NULL	jne findSCB_by_SCBID; -	mov	A, SAVED_TCL; -	mvi	SCB_TCL	jmp findSCB_loop;	/* &SCB_TCL -> SINDEX */ -findSCB_by_SCBID: -	mov	A, ARG_1;			/* Tag passed in ARG_1 */ -	mvi	SCB_TAG	jmp findSCB_loop;	/* &SCB_TAG -> SINDEX */ -findSCB_next: -	mov     ARG_2, SCBPTR; -	cmp	SCB_NEXT, SCB_LIST_NULL je notFound; -	mov	SCBPTR,SCB_NEXT; -	dec	SINDEX;		/* Last comparison moved us too far */ -findSCB_loop: -	cmp	SINDIR, A	jne findSCB_next; -	mov	SINDEX, SCBPTR 	ret; -notFound: -	mvi	SINDEX, SCB_LIST_NULL	ret; - -/* - * Retrieve an SCB by SCBID first searching the disconnected list falling - * back to DMA'ing the SCB down from the host.  This routine assumes that - * ARG_1 is the SCBID of interest and that SINDEX is the position in the - * disconnected list to start the search from.  If SINDEX is SCB_LIST_NULL, - * we go directly to the host for the SCB. - */ -retrieveSCB: -	test	SEQ_FLAGS, SCBPTR_VALID	jz retrieve_from_host; -	mov	SCBPTR	call findSCB;	/* Continue the search */ -	cmp	SINDEX, SCB_LIST_NULL	je retrieve_from_host; - -/* - * This routine expects SINDEX to contain the index of the SCB to be - * removed, SCBPTR to be pointing to that SCB, and ARG_2 to be the - * SCBID of the SCB just previous to this one in the list or SCB_LIST_NULL - * if it is at the head. - */ -rem_scb_from_disc_list: -/* Remove this SCB from the disconnection list */ -	cmp     ARG_2, SCB_LIST_NULL    je rHead; -	mov	DINDEX, SCB_NEXT; -	mov	SCBPTR, ARG_2; -	mov	SCB_NEXT, DINDEX; -	mov	SCBPTR, SINDEX ret; -rHead: -	mov	DISCONNECTED_SCBH,SCB_NEXT ret; - -retrieve_from_host: -/* - * We didn't find it.  Pull an SCB and DMA down the one we want. - * We should never get here in the non-paging case. - */ -	mov	ALLZEROS	call	get_free_or_disc_scb; -	mvi	DMAPARAMS, HDMAEN|DIRECTION|FIFORESET; -	/* Jump instead of call as we want to return anyway */ -	mov	ARG_1	jmp dma_scb; - -/* - * Determine whether a target is using tagged or non-tagged transactions - * by first looking for a matching transaction based on the TCL and if - * that fails, looking up this device in the host's untagged SCB array. - * The TCL to search for is assumed to be in SAVED_TCL.  The value is - * returned in ARG_1 (SCB_LIST_NULL for tagged, SCBID for non-tagged). - * The SCBPTR_VALID bit is set in SEQ_FLAGS if we found the information - * in an SCB instead of having to go to the host. - */ -get_untagged_SCBID: -	cmp	DISCONNECTED_SCBH, SCB_LIST_NULL je get_SCBID_from_host; -	mvi	ARG_1, SCB_LIST_NULL; -	mov	DISCONNECTED_SCBH call findSCB; -	cmp	SINDEX, SCB_LIST_NULL	je get_SCBID_from_host; -	or	SEQ_FLAGS, SCBPTR_VALID;/* Was in disconnected list */ -	test	SCB_CONTROL, TAG_ENB	jnz . + 2; -	mov	ARG_1, SCB_TAG	ret; -	mvi	ARG_1, SCB_LIST_NULL ret; - -/* - * Fetch a byte from host memory given an index of (A + (256 * SINDEX)) - * and a base address of SCBID_ADDR.  The byte is returned in RETURN_2. - */ -fetch_byte: -	mov	ARG_2, SINDEX; -	if ((p->features & AHC_CMD_CHAN) != 0) { -		mvi	DINDEX, CCHADDR; -		mvi	SCBID_ADDR call set_1byte_addr; -		mvi	CCHCNT, 1; -		mvi	CCSGCTL, CCSGEN|CCSGRESET; -		test	CCSGCTL, CCSGDONE jz .; -		mvi	CCSGCTL, CCSGRESET; -		bmov	RETURN_2, CCSGRAM, 1 ret; -	} else { -		mvi	DINDEX, HADDR; -		mvi	SCBID_ADDR call set_1byte_addr; -		mvi	HCNT[0], 1; -		clr	HCNT[1]; -		clr	HCNT[2]; -		mvi	DFCNTRL, HDMAEN|DIRECTION|FIFORESET; -		call	dma_finish; -		mov	RETURN_2, DFDAT ret; -	} - -/* - * Prepare the hardware to post a byte to host memory given an - * index of (A + (256 * SINDEX)) and a base address of SCBID_ADDR. - */ -post_byte_setup: -	mov	ARG_2, SINDEX; -	if ((p->features & AHC_CMD_CHAN) != 0) { -		mvi	DINDEX, CCHADDR; -		mvi	SCBID_ADDR call	set_1byte_addr; -		mvi	CCHCNT, 1; -		mvi	CCSCBCTL, CCSCBRESET ret; -	} else { -		mvi	DINDEX, HADDR; -		mvi	SCBID_ADDR call	set_1byte_addr; -		mvi	HCNT[0], 1; -		clr	HCNT[1]; -		clr	HCNT[2]; -		mvi	DFCNTRL, FIFORESET ret; -	} - -post_byte: -	if ((p->features & AHC_CMD_CHAN) != 0) { -		bmov	CCSCBRAM, SINDEX, 1; -		or	CCSCBCTL, CCSCBEN|CCSCBRESET; -		test	CCSCBCTL, CCSCBDONE jz .; -		clr	CCSCBCTL ret; -	} else { -		mov	DFDAT, SINDEX; -		or	DFCNTRL, HDMAEN|FIFOFLUSH; -		jmp	dma_finish; -	} - -get_SCBID_from_host: -	mov	A, SAVED_TCL; -	mvi	UNTAGGEDSCB_OFFSET call fetch_byte; -	mov	RETURN_1,  RETURN_2 ret; - -phase_lock:      -	test	SSTAT1, REQINIT jz phase_lock; -	test	SSTAT1, SCSIPERR jnz phase_lock; -	and	SCSISIGO, PHASE_MASK, SCSISIGI; -	and	LASTPHASE, PHASE_MASK, SCSISIGI ret; - -if ((p->features & AHC_CMD_CHAN) == 0) { -set_stcnt_from_hcnt: -	mov	STCNT[0], HCNT[0]; -	mov	STCNT[1], HCNT[1]; -	mov	STCNT[2], HCNT[2] ret; - -bcopy_7: -	mov	DINDIR, SINDIR; -	mov	DINDIR, SINDIR; -bcopy_5: -	mov	DINDIR, SINDIR; -bcopy_4: -	mov	DINDIR, SINDIR; -bcopy_3: -	mov	DINDIR, SINDIR; -	mov	DINDIR, SINDIR; -	mov	DINDIR, SINDIR ret; -} - -/* - * Setup addr assuming that A is an index into - * an array of 32byte objects, SINDEX contains - * the base address of that array, and DINDEX - * contains the base address of the location - * to store the indexed address. - */ -set_32byte_addr: -	shr	ARG_2, 3, A; -	shl	A, 5; -/* - * Setup addr assuming that A + (ARG_1 * 256) is an - * index into an array of 1byte objects, SINDEX contains - * the base address of that array, and DINDEX contains - * the base address of the location to store the computed - * address. - */ -set_1byte_addr: -	add	DINDIR, A, SINDIR; -	mov	A, ARG_2; -	adc	DINDIR, A, SINDIR; -	clr	A; -	adc	DINDIR, A, SINDIR; -	adc	DINDIR, A, SINDIR ret; - -/* - * Either post or fetch and SCB from host memory based on the - * DIRECTION bit in DMAPARAMS. The host SCB index is in SINDEX. - */ -dma_scb: -	mov	A, SINDEX; -	if ((p->features & AHC_CMD_CHAN) != 0) { -		mvi	DINDEX, CCHADDR; -		mvi	HSCB_ADDR call set_32byte_addr; -		mov	CCSCBPTR, SCBPTR; -		mvi	CCHCNT, 32; -		test	DMAPARAMS, DIRECTION jz dma_scb_tohost; -		mvi	CCSCBCTL, CCARREN|CCSCBEN|CCSCBDIR|CCSCBRESET; -		cmp	CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN|CCSCBDIR jne .; -		jmp	dma_scb_finish; -dma_scb_tohost: -		if ((p->features & AHC_ULTRA2) == 0) { -			mvi	CCSCBCTL, CCSCBRESET; -			bmov	CCSCBRAM, SCB_CONTROL, 32; -			or	CCSCBCTL, CCSCBEN|CCSCBRESET; -			test	CCSCBCTL, CCSCBDONE jz .; -		} -		if ((p->features & AHC_ULTRA2) != 0) { -			if ((p->bugs & AHC_BUG_SCBCHAN_UPLOAD) != 0) { -				mvi     CCSCBCTL, CCARREN|CCSCBRESET; -				cmp     CCSCBCTL, ARRDONE|CCARREN jne .; -                        	mvi     CCHCNT, 32; -				mvi     CCSCBCTL, CCSCBEN|CCSCBRESET; -				cmp     CCSCBCTL, CCSCBDONE|CCSCBEN jne .; -			} else { -				mvi	CCSCBCTL, CCARREN|CCSCBEN|CCSCBRESET; -				cmp	CCSCBCTL, CCSCBDONE|ARRDONE|CCARREN|CCSCBEN jne .; -			} -		} -dma_scb_finish: -		clr	CCSCBCTL; -		test	CCSCBCTL, CCARREN|CCSCBEN jnz .; -		ret; -	} -	if ((p->features & AHC_CMD_CHAN) == 0) { -		mvi	DINDEX, HADDR; -		mvi	HSCB_ADDR call set_32byte_addr; -		mvi	HCNT[0], 32; -		clr	HCNT[1]; -		clr	HCNT[2]; -		mov	DFCNTRL, DMAPARAMS; -		test	DMAPARAMS, DIRECTION	jnz dma_scb_fromhost; -		/* Fill it with the SCB data */ -copy_scb_tofifo: -		mvi	SINDEX, SCB_CONTROL; -		add	A, 32, SINDEX; -copy_scb_tofifo_loop: -		mov	DFDAT,SINDIR; -		mov	DFDAT,SINDIR; -		mov	DFDAT,SINDIR; -		mov	DFDAT,SINDIR; -		mov	DFDAT,SINDIR; -		mov	DFDAT,SINDIR; -		mov	DFDAT,SINDIR; -		mov	DFDAT,SINDIR; -		cmp	SINDEX, A jne copy_scb_tofifo_loop; -		or	DFCNTRL, HDMAEN|FIFOFLUSH; -		jmp	dma_finish; -dma_scb_fromhost: -		mvi	DINDEX, SCB_CONTROL; -		if ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0) { -			/* -			 * Set the A to -24.  It it hits 0, then we let -			 * our code fall through to dfdat_in_8 to complete -			 * the last of the copy. -			 * -			 * Also, things happen 8 bytes at a time in this -			 * case, so we may need to drain the fifo at most -			 * 3 times to keep things flowing -			 */ -			mvi	A, -24; -dma_scb_hang_fifo: -			/* Wait for the first bit of data to hit the fifo */ -			test	DFSTATUS, FIFOEMP jnz .; -dma_scb_hang_wait: -			/* OK, now they've started to transfer into the fifo, -			 * so wait for them to stop trying to transfer any -			 * more data. -			 */ -			test	DFSTATUS, MREQPEND jnz .; -			/* -			 * OK, they started, then they stopped, now see if they -			 * managed to complete the job before stopping.  Try -			 * it multiple times to give the chip a few cycles to -			 * set the flag if it did complete. -			 */ -			test	DFSTATUS, HDONE jnz dma_scb_hang_dma_done; -			test	DFSTATUS, HDONE jnz dma_scb_hang_dma_done; -			test	DFSTATUS, HDONE jnz dma_scb_hang_dma_done; -			/* -			 * Too bad, the chip didn't complete the DMA, but there -			 * aren't any more memory requests pending, so that -			 * means it stopped part way through and hung.  That's -			 * our bug, so now we drain what data there is in the -			 * fifo in order to get things going again. -			 */ -dma_scb_hang_empty_fifo: -			call	dfdat_in_8; -			add	A, 8; -			add	SINDEX, A, HCNT; -			/* -			 * If there are another 8 bytes of data waiting in the -			 * fifo, then the carry bit will be set as a result -			 * of the above add command (unless A is non-negative, -			 * in which case the carry bit won't be set). -			 */ -			jc	dma_scb_hang_empty_fifo; -			/* -			 * We've emptied the fifo now, but we wouldn't have got -			 * here if the memory transfer hadn't stopped part way -			 * through, so go back up to the beginning of the -			 * loop and start over.  When it succeeds in getting -			 * all the data down, HDONE will be set and we'll -			 * jump to the code just below here. -			 */ -			jmp	dma_scb_hang_fifo; -dma_scb_hang_dma_done: -			and	DFCNTRL, ~HDMAEN; -			test	DFCNTRL, HDMAEN jnz .; -			call	dfdat_in_8; -			add	A, 8; -			cmp	A, 8 jne . - 2; -			ret; -		} else { -			call	dma_finish; -			call	dfdat_in_8; -			call	dfdat_in_8; -			call	dfdat_in_8; -		} -dfdat_in_8: -		mov	DINDIR,DFDAT; -dfdat_in_7: -		mov	DINDIR,DFDAT; -		mov	DINDIR,DFDAT; -		mov	DINDIR,DFDAT; -		mov	DINDIR,DFDAT; -		mov	DINDIR,DFDAT; -		mov	DINDIR,DFDAT; -		mov	DINDIR,DFDAT ret; -	} - - -/* - * Wait for DMA from host memory to data FIFO to complete, then disable - * DMA and wait for it to acknowledge that it's off. - */ -if ((p->features & AHC_CMD_CHAN) == 0) { -dma_finish: -	test	DFSTATUS,HDONE	jz dma_finish; -	/* Turn off DMA */ -	and	DFCNTRL, ~HDMAEN; -	test	DFCNTRL, HDMAEN jnz .; -	ret; -} - -add_scb_to_free_list: -	if ((p->flags & AHC_PAGESCBS) != 0) { -		mov	SCB_NEXT, FREE_SCBH; -		mov	FREE_SCBH, SCBPTR; -	} -	mvi	SCB_TAG, SCB_LIST_NULL ret; - -if ((p->flags & AHC_PAGESCBS) != 0) { -get_free_or_disc_scb: -	cmp	FREE_SCBH, SCB_LIST_NULL jne dequeue_free_scb; -	cmp	DISCONNECTED_SCBH, SCB_LIST_NULL jne dequeue_disc_scb; -return_error: -	mvi	SINDEX, SCB_LIST_NULL	ret; -dequeue_disc_scb: -	mov	SCBPTR, DISCONNECTED_SCBH; -dma_up_scb: -	mvi	DMAPARAMS, FIFORESET; -	mov	SCB_TAG		call dma_scb; -unlink_disc_scb: -	mov	DISCONNECTED_SCBH, SCB_NEXT ret; -dequeue_free_scb: -	mov	SCBPTR, FREE_SCBH; -	mov	FREE_SCBH, SCB_NEXT ret; -} - -add_scb_to_disc_list: -/* - * Link this SCB into the DISCONNECTED list.  This list holds the - * candidates for paging out an SCB if one is needed for a new command. - * Modifying the disconnected list is a critical(pause dissabled) section. - */ -	mov	SCB_NEXT, DISCONNECTED_SCBH; -	mov	DISCONNECTED_SCBH, SCBPTR ret; diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c b/drivers/scsi/aic7xxx_old/aic7xxx_proc.c deleted file mode 100644 index 976f45ccf2c..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx_proc.c +++ /dev/null @@ -1,270 +0,0 @@ -/*+M************************************************************************* - * Adaptec AIC7xxx device driver proc support for Linux. - * - * Copyright (c) 1995, 1996 Dean W. Gehnert - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; see the file COPYING.  If not, write to - * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. - * - * ---------------------------------------------------------------- - *  o Modified from the EATA-DMA /proc support. - *  o Additional support for device block statistics provided by - *    Matthew Jacob. - *  o Correction of overflow by Heinz Mauelshagen - *  o Adittional corrections by Doug Ledford - * - *  Dean W. Gehnert, deang@teleport.com, 05/01/96 - * - *  $Id: aic7xxx_proc.c,v 4.1 1997/06/97 08:23:42 deang Exp $ - *-M*************************************************************************/ - - -#define HDRB \ -"               0 - 4K   4 - 16K   16 - 64K  64 - 256K  256K - 1M        1M+" - - -/*+F************************************************************************* - * Function: - *   aic7xxx_show_info - * - * Description: - *   Return information to handle /proc support for the driver. - *-F*************************************************************************/ -int -aic7xxx_show_info(struct seq_file *m, struct Scsi_Host *HBAptr) -{ -  struct aic7xxx_host *p; -  struct aic_dev_data *aic_dev; -  struct scsi_device *sdptr; -  unsigned char i; -  unsigned char tindex; - -  for(p=first_aic7xxx; p && p->host != HBAptr; p=p->next) -    ; - -  if (!p) -  { -    seq_printf(m, "Can't find adapter for host number %d\n", HBAptr->host_no); -    return 0; -  } - -  p = (struct aic7xxx_host *) HBAptr->hostdata; - -  seq_printf(m, "Adaptec AIC7xxx driver version: "); -  seq_printf(m, "%s/", AIC7XXX_C_VERSION); -  seq_printf(m, "%s", AIC7XXX_H_VERSION); -  seq_printf(m, "\n"); -  seq_printf(m, "Adapter Configuration:\n"); -  seq_printf(m, "           SCSI Adapter: %s\n", -      board_names[p->board_name_index]); -  if (p->flags & AHC_TWIN) -    seq_printf(m, "                         Twin Channel Controller "); -  else -  { -    char *channel = ""; -    char *ultra = ""; -    char *wide = "Narrow "; -    if (p->flags & AHC_MULTI_CHANNEL) -    { -      channel = " Channel A"; -      if (p->flags & (AHC_CHNLB|AHC_CHNLC)) -        channel = (p->flags & AHC_CHNLB) ? " Channel B" : " Channel C"; -    } -    if (p->features & AHC_WIDE) -      wide = "Wide "; -    if (p->features & AHC_ULTRA3) -    { -      switch(p->chip & AHC_CHIPID_MASK) -      { -        case AHC_AIC7892: -        case AHC_AIC7899: -          ultra = "Ultra-160/m LVD/SE "; -          break; -        default: -          ultra = "Ultra-3 LVD/SE "; -          break; -      } -    } -    else if (p->features & AHC_ULTRA2) -      ultra = "Ultra-2 LVD/SE "; -    else if (p->features & AHC_ULTRA) -      ultra = "Ultra "; -    seq_printf(m, "                           %s%sController%s ", -      ultra, wide, channel); -  } -  switch(p->chip & ~AHC_CHIPID_MASK) -  { -    case AHC_VL: -      seq_printf(m, "at VLB slot %d\n", p->pci_device_fn); -      break; -    case AHC_EISA: -      seq_printf(m, "at EISA slot %d\n", p->pci_device_fn); -      break; -    default: -      seq_printf(m, "at PCI %d/%d/%d\n", p->pci_bus, -        PCI_SLOT(p->pci_device_fn), PCI_FUNC(p->pci_device_fn)); -      break; -  } -  if( !(p->maddr) ) -  { -    seq_printf(m, "    Programmed I/O Base: %lx\n", p->base); -  } -  else -  { -    seq_printf(m, "    PCI MMAPed I/O Base: 0x%lx\n", p->mbase); -  } -  if( (p->chip & (AHC_VL | AHC_EISA)) ) -  { -    seq_printf(m, "    BIOS Memory Address: 0x%08x\n", p->bios_address); -  } -  seq_printf(m, " Adapter SEEPROM Config: %s\n", -          (p->flags & AHC_SEEPROM_FOUND) ? "SEEPROM found and used." : -         ((p->flags & AHC_USEDEFAULTS) ? "SEEPROM not found, using defaults." : -           "SEEPROM not found, using leftover BIOS values.") ); -  seq_printf(m, "      Adaptec SCSI BIOS: %s\n", -          (p->flags & AHC_BIOS_ENABLED) ? "Enabled" : "Disabled"); -  seq_printf(m, "                    IRQ: %d\n", HBAptr->irq); -  seq_printf(m, "                   SCBs: Active %d, Max Active %d,\n", -            p->activescbs, p->max_activescbs); -  seq_printf(m, "                         Allocated %d, HW %d, " -            "Page %d\n", p->scb_data->numscbs, p->scb_data->maxhscbs, -            p->scb_data->maxscbs); -  if (p->flags & AHC_EXTERNAL_SRAM) -    seq_printf(m, "                         Using External SCB SRAM\n"); -  seq_printf(m, "             Interrupts: %ld", p->isr_count); -  if (p->chip & AHC_EISA) -  { -    seq_printf(m, " %s\n", -        (p->pause & IRQMS) ? "(Level Sensitive)" : "(Edge Triggered)"); -  } -  else -  { -    seq_printf(m, "\n"); -  } -  seq_printf(m, "      BIOS Control Word: 0x%04x\n", -            p->bios_control); -  seq_printf(m, "   Adapter Control Word: 0x%04x\n", -            p->adapter_control); -  seq_printf(m, "   Extended Translation: %sabled\n", -      (p->flags & AHC_EXTEND_TRANS_A) ? "En" : "Dis"); -  seq_printf(m, "Disconnect Enable Flags: 0x%04x\n", p->discenable); -  if (p->features & (AHC_ULTRA | AHC_ULTRA2)) -  { -    seq_printf(m, "     Ultra Enable Flags: 0x%04x\n", p->ultraenb); -  } -  seq_printf(m, "Default Tag Queue Depth: %d\n", aic7xxx_default_queue_depth); -  seq_printf(m, "    Tagged Queue By Device array for aic7xxx host " -                       "instance %d:\n", p->instance); -  seq_printf(m, "      {"); -  for(i=0; i < (MAX_TARGETS - 1); i++) -    seq_printf(m, "%d,",aic7xxx_tag_info[p->instance].tag_commands[i]); -  seq_printf(m, "%d}\n",aic7xxx_tag_info[p->instance].tag_commands[i]); - -  seq_printf(m, "\n"); -  seq_printf(m, "Statistics:\n\n"); -  list_for_each_entry(aic_dev, &p->aic_devs, list) -  { -    sdptr = aic_dev->SDptr; -    tindex = sdptr->channel << 3 | sdptr->id; -    seq_printf(m, "(scsi%d:%d:%d:%d)\n", -        p->host_no, sdptr->channel, sdptr->id, sdptr->lun); -    seq_printf(m, "  Device using %s/%s", -          (aic_dev->cur.width == MSG_EXT_WDTR_BUS_16_BIT) ? -          "Wide" : "Narrow", -          (aic_dev->cur.offset != 0) ? -          "Sync transfers at " : "Async transfers.\n" ); -    if (aic_dev->cur.offset != 0) -    { -      struct aic7xxx_syncrate *sync_rate; -      unsigned char options = aic_dev->cur.options; -      int period = aic_dev->cur.period; -      int rate = (aic_dev->cur.width == -                  MSG_EXT_WDTR_BUS_16_BIT) ? 1 : 0; - -      sync_rate = aic7xxx_find_syncrate(p, &period, 0, &options); -      if (sync_rate != NULL) -      { -        seq_printf(m, "%s MByte/sec, offset %d\n", -                        sync_rate->rate[rate], -                        aic_dev->cur.offset ); -      } -      else -      { -        seq_printf(m, "3.3 MByte/sec, offset %d\n", -                        aic_dev->cur.offset ); -      } -    } -    seq_printf(m, "  Transinfo settings: "); -    seq_printf(m, "current(%d/%d/%d/%d), ", -                    aic_dev->cur.period, -                    aic_dev->cur.offset, -                    aic_dev->cur.width, -                    aic_dev->cur.options); -    seq_printf(m, "goal(%d/%d/%d/%d), ", -                    aic_dev->goal.period, -                    aic_dev->goal.offset, -                    aic_dev->goal.width, -                    aic_dev->goal.options); -    seq_printf(m, "user(%d/%d/%d/%d)\n", -                    p->user[tindex].period, -                    p->user[tindex].offset, -                    p->user[tindex].width, -                    p->user[tindex].options); -    if(sdptr->simple_tags) -    { -      seq_printf(m, "  Tagged Command Queueing Enabled, Ordered Tags %s, Depth %d/%d\n", sdptr->ordered_tags ? "Enabled" : "Disabled", sdptr->queue_depth, aic_dev->max_q_depth); -    } -    if(aic_dev->barrier_total) -      seq_printf(m, "  Total transfers %ld:\n    (%ld/%ld/%ld/%ld reads/writes/REQ_BARRIER/Ordered Tags)\n", -        aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total, -        aic_dev->barrier_total, aic_dev->ordered_total); -    else -      seq_printf(m, "  Total transfers %ld:\n    (%ld/%ld reads/writes)\n", -        aic_dev->r_total+aic_dev->w_total, aic_dev->r_total, aic_dev->w_total); -    seq_printf(m, "%s\n", HDRB); -    seq_printf(m, "   Reads:"); -    for (i = 0; i < ARRAY_SIZE(aic_dev->r_bins); i++) -    { -      seq_printf(m, " %10ld", aic_dev->r_bins[i]); -    } -    seq_printf(m, "\n"); -    seq_printf(m, "  Writes:"); -    for (i = 0; i < ARRAY_SIZE(aic_dev->w_bins); i++) -    { -      seq_printf(m, " %10ld", aic_dev->w_bins[i]); -    } -    seq_printf(m, "\n"); -    seq_printf(m, "\n\n"); -  } -  return 0; -} - -/* - * Overrides for Emacs so that we follow Linus's tabbing style. - * Emacs will notice this stuff at the end of the file and automatically - * adjust the settings for this buffer only.  This must remain at the end - * of the file. - * --------------------------------------------------------------------------- - * Local variables: - * c-indent-level: 2 - * c-brace-imaginary-offset: 0 - * c-brace-offset: -2 - * c-argdecl-indent: 2 - * c-label-offset: -2 - * c-continued-statement-offset: 2 - * c-continued-brace-offset: 0 - * indent-tabs-mode: nil - * tab-width: 8 - * End: - */ diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_reg.h b/drivers/scsi/aic7xxx_old/aic7xxx_reg.h deleted file mode 100644 index 27f2334abc7..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx_reg.h +++ /dev/null @@ -1,629 +0,0 @@ -/* -  * DO NOT EDIT - This file is automatically generated. -  */ - -#define	SCSISEQ         		0x00 -#define		TEMODE          	0x80 -#define		ENSELO          	0x40 -#define		ENSELI          	0x20 -#define		ENRSELI         	0x10 -#define		ENAUTOATNO      	0x08 -#define		ENAUTOATNI      	0x04 -#define		ENAUTOATNP      	0x02 -#define		SCSIRSTO        	0x01 - -#define	SXFRCTL0        		0x01 -#define		DFON            	0x80 -#define		DFPEXP          	0x40 -#define		FAST20          	0x20 -#define		CLRSTCNT        	0x10 -#define		SPIOEN          	0x08 -#define		SCAMEN          	0x04 -#define		CLRCHN          	0x02 - -#define	SXFRCTL1        		0x02 -#define		BITBUCKET       	0x80 -#define		SWRAPEN         	0x40 -#define		ENSPCHK         	0x20 -#define		STIMESEL        	0x18 -#define		ENSTIMER        	0x04 -#define		ACTNEGEN        	0x02 -#define		STPWEN          	0x01 - -#define	SCSISIGO        		0x03 -#define		CDO             	0x80 -#define		IOO             	0x40 -#define		MSGO            	0x20 -#define		ATNO            	0x10 -#define		SELO            	0x08 -#define		BSYO            	0x04 -#define		REQO            	0x02 -#define		ACKO            	0x01 - -#define	SCSISIGI        		0x03 -#define		ATNI            	0x10 -#define		SELI            	0x08 -#define		BSYI            	0x04 -#define		REQI            	0x02 -#define		ACKI            	0x01 - -#define	SCSIRATE        		0x04 -#define		WIDEXFER        	0x80 -#define		SXFR_ULTRA2     	0x7f -#define		SXFR            	0x70 -#define		SOFS            	0x0f - -#define	SCSIID          		0x05 -#define	SCSIOFFSET      		0x05 -#define		SOFS_ULTRA2     	0x7f - -#define	SCSIDATL        		0x06 - -#define	SCSIDATH        		0x07 - -#define	STCNT           		0x08 - -#define	OPTIONMODE      		0x08 -#define		AUTORATEEN      	0x80 -#define		AUTOACKEN       	0x40 -#define		ATNMGMNTEN      	0x20 -#define		BUSFREEREV      	0x10 -#define		EXPPHASEDIS     	0x08 -#define		SCSIDATL_IMGEN  	0x04 -#define		AUTO_MSGOUT_DE  	0x02 -#define		DIS_MSGIN_DUALEDGE	0x01 - -#define	CLRSINT0        		0x0b -#define		CLRSELDO        	0x40 -#define		CLRSELDI        	0x20 -#define		CLRSELINGO      	0x10 -#define		CLRSWRAP        	0x08 -#define		CLRSPIORDY      	0x02 - -#define	SSTAT0          		0x0b -#define		TARGET          	0x80 -#define		SELDO           	0x40 -#define		SELDI           	0x20 -#define		SELINGO         	0x10 -#define		IOERR           	0x08 -#define		SWRAP           	0x08 -#define		SDONE           	0x04 -#define		SPIORDY         	0x02 -#define		DMADONE         	0x01 - -#define	CLRSINT1        		0x0c -#define		CLRSELTIMEO     	0x80 -#define		CLRATNO         	0x40 -#define		CLRSCSIRSTI     	0x20 -#define		CLRBUSFREE      	0x08 -#define		CLRSCSIPERR     	0x04 -#define		CLRPHASECHG     	0x02 -#define		CLRREQINIT      	0x01 - -#define	SSTAT1          		0x0c -#define		SELTO           	0x80 -#define		ATNTARG         	0x40 -#define		SCSIRSTI        	0x20 -#define		PHASEMIS        	0x10 -#define		BUSFREE         	0x08 -#define		SCSIPERR        	0x04 -#define		PHASECHG        	0x02 -#define		REQINIT         	0x01 - -#define	SSTAT2          		0x0d -#define		OVERRUN         	0x80 -#define		SHVALID         	0x40 -#define		WIDE_RES        	0x20 -#define		SFCNT           	0x1f -#define		EXP_ACTIVE      	0x10 -#define		CRCVALERR       	0x08 -#define		CRCENDERR       	0x04 -#define		CRCREQERR       	0x02 -#define		DUAL_EDGE_ERROR 	0x01 - -#define	SSTAT3          		0x0e -#define		SCSICNT         	0xf0 -#define		OFFCNT          	0x0f - -#define	SCSIID_ULTRA2   		0x0f -#define		OID             	0x0f - -#define	SIMODE0         		0x10 -#define		ENSELDO         	0x40 -#define		ENSELDI         	0x20 -#define		ENSELINGO       	0x10 -#define		ENIOERR         	0x08 -#define		ENSWRAP         	0x08 -#define		ENSDONE         	0x04 -#define		ENSPIORDY       	0x02 -#define		ENDMADONE       	0x01 - -#define	SIMODE1         		0x11 -#define		ENSELTIMO       	0x80 -#define		ENATNTARG       	0x40 -#define		ENSCSIRST       	0x20 -#define		ENPHASEMIS      	0x10 -#define		ENBUSFREE       	0x08 -#define		ENSCSIPERR      	0x04 -#define		ENPHASECHG      	0x02 -#define		ENREQINIT       	0x01 - -#define	SCSIBUSL        		0x12 - -#define	SCSIBUSH        		0x13 - -#define	SHADDR          		0x14 - -#define	SELTIMER        		0x18 -#define		STAGE6          	0x20 -#define		STAGE5          	0x10 -#define		STAGE4          	0x08 -#define		STAGE3          	0x04 -#define		STAGE2          	0x02 -#define		STAGE1          	0x01 - -#define	SELID           		0x19 -#define		SELID_MASK      	0xf0 -#define		ONEBIT          	0x08 - -#define	SPIOCAP         		0x1b -#define		SOFT1           	0x80 -#define		SOFT0           	0x40 -#define		SOFTCMDEN       	0x20 -#define		HAS_BRDCTL      	0x10 -#define		SEEPROM         	0x08 -#define		EEPROM          	0x04 -#define		ROM             	0x02 -#define		SSPIOCPS        	0x01 - -#define	BRDCTL          		0x1d -#define		BRDDAT7         	0x80 -#define		BRDDAT6         	0x40 -#define		BRDDAT5         	0x20 -#define		BRDDAT4         	0x10 -#define		BRDSTB          	0x10 -#define		BRDCS           	0x08 -#define		BRDDAT3         	0x08 -#define		BRDDAT2         	0x04 -#define		BRDRW           	0x04 -#define		BRDRW_ULTRA2    	0x02 -#define		BRDCTL1         	0x02 -#define		BRDSTB_ULTRA2   	0x01 -#define		BRDCTL0         	0x01 - -#define	SEECTL          		0x1e -#define		EXTARBACK       	0x80 -#define		EXTARBREQ       	0x40 -#define		SEEMS           	0x20 -#define		SEERDY          	0x10 -#define		SEECS           	0x08 -#define		SEECK           	0x04 -#define		SEEDO           	0x02 -#define		SEEDI           	0x01 - -#define	SBLKCTL         		0x1f -#define		DIAGLEDEN       	0x80 -#define		DIAGLEDON       	0x40 -#define		AUTOFLUSHDIS    	0x20 -#define		ENAB40          	0x08 -#define		ENAB20          	0x04 -#define		SELWIDE         	0x02 -#define		XCVR            	0x01 - -#define	SRAM_BASE       		0x20 - -#define	TARG_SCSIRATE   		0x20 - -#define	ULTRA_ENB       		0x30 - -#define	DISC_DSB        		0x32 - -#define	MSG_OUT         		0x34 - -#define	DMAPARAMS       		0x35 -#define		PRELOADEN       	0x80 -#define		WIDEODD         	0x40 -#define		SCSIEN          	0x20 -#define		SDMAENACK       	0x10 -#define		SDMAEN          	0x10 -#define		HDMAEN          	0x08 -#define		HDMAENACK       	0x08 -#define		DIRECTION       	0x04 -#define		FIFOFLUSH       	0x02 -#define		FIFORESET       	0x01 - -#define	SEQ_FLAGS       		0x36 -#define		IDENTIFY_SEEN   	0x80 -#define		SCBPTR_VALID    	0x20 -#define		DPHASE          	0x10 -#define		AMTARGET        	0x08 -#define		WIDE_BUS        	0x02 -#define		TWIN_BUS        	0x01 - -#define	SAVED_TCL       		0x37 - -#define	SG_COUNT        		0x38 - -#define	SG_NEXT         		0x39 - -#define	LASTPHASE       		0x3d -#define		P_MESGIN        	0xe0 -#define		PHASE_MASK      	0xe0 -#define		P_STATUS        	0xc0 -#define		P_MESGOUT       	0xa0 -#define		P_COMMAND       	0x80 -#define		CDI             	0x80 -#define		IOI             	0x40 -#define		P_DATAIN        	0x40 -#define		MSGI            	0x20 -#define		P_BUSFREE       	0x01 -#define		P_DATAOUT       	0x00 - -#define	WAITING_SCBH    		0x3e - -#define	DISCONNECTED_SCBH		0x3f - -#define	FREE_SCBH       		0x40 - -#define	HSCB_ADDR       		0x41 - -#define	SCBID_ADDR      		0x45 - -#define	TMODE_CMDADDR   		0x49 - -#define	KERNEL_QINPOS   		0x4d - -#define	QINPOS          		0x4e - -#define	QOUTPOS         		0x4f - -#define	TMODE_CMDADDR_NEXT		0x50 - -#define	ARG_1           		0x51 -#define	RETURN_1        		0x51 -#define		SEND_MSG        	0x80 -#define		SEND_SENSE      	0x40 -#define		SEND_REJ        	0x20 -#define		MSGOUT_PHASEMIS 	0x10 - -#define	ARG_2           		0x52 -#define	RETURN_2        		0x52 - -#define	LAST_MSG        		0x53 - -#define	PREFETCH_CNT    		0x54 - -#define	SCSICONF        		0x5a -#define		TERM_ENB        	0x80 -#define		RESET_SCSI      	0x40 -#define		HWSCSIID        	0x0f -#define		HSCSIID         	0x07 - -#define	HOSTCONF        		0x5d - -#define	HA_274_BIOSCTRL 		0x5f -#define		BIOSMODE        	0x30 -#define		BIOSDISABLED    	0x30 -#define		CHANNEL_B_PRIMARY	0x08 - -#define	SEQCTL          		0x60 -#define		PERRORDIS       	0x80 -#define		PAUSEDIS        	0x40 -#define		FAILDIS         	0x20 -#define		FASTMODE        	0x10 -#define		BRKADRINTEN     	0x08 -#define		STEP            	0x04 -#define		SEQRESET        	0x02 -#define		LOADRAM         	0x01 - -#define	SEQRAM          		0x61 - -#define	SEQADDR0        		0x62 - -#define	SEQADDR1        		0x63 -#define		SEQADDR1_MASK   	0x01 - -#define	ACCUM           		0x64 - -#define	SINDEX          		0x65 - -#define	DINDEX          		0x66 - -#define	ALLONES         		0x69 - -#define	ALLZEROS        		0x6a - -#define	NONE            		0x6a - -#define	FLAGS           		0x6b -#define		ZERO            	0x02 -#define		CARRY           	0x01 - -#define	SINDIR          		0x6c - -#define	DINDIR          		0x6d - -#define	FUNCTION1       		0x6e - -#define	STACK           		0x6f - -#define	TARG_OFFSET     		0x70 - -#define	BCTL            		0x84 -#define		ACE             	0x08 -#define		ENABLE          	0x01 - -#define	DSCOMMAND0      		0x84 -#define		INTSCBRAMSEL    	0x08 -#define		RAMPS           	0x04 -#define		USCBSIZE32      	0x02 -#define		CIOPARCKEN      	0x01 - -#define	DSCOMMAND       		0x84 -#define		CACHETHEN       	0x80 -#define		DPARCKEN        	0x40 -#define		MPARCKEN        	0x20 -#define		EXTREQLCK       	0x10 - -#define	BUSTIME         		0x85 -#define		BOFF            	0xf0 -#define		BON             	0x0f - -#define	BUSSPD          		0x86 -#define		DFTHRSH         	0xc0 -#define		STBOFF          	0x38 -#define		STBON           	0x07 - -#define	DSPCISTATUS     		0x86 -#define		DFTHRSH_100     	0xc0 - -#define	HCNTRL          		0x87 -#define		POWRDN          	0x40 -#define		SWINT           	0x10 -#define		IRQMS           	0x08 -#define		PAUSE           	0x04 -#define		INTEN           	0x02 -#define		CHIPRST         	0x01 -#define		CHIPRSTACK      	0x01 - -#define	HADDR           		0x88 - -#define	HCNT            		0x8c - -#define	SCBPTR          		0x90 - -#define	INTSTAT         		0x91 -#define		SEQINT_MASK     	0xf1 -#define		DATA_OVERRUN    	0xe1 -#define		MSGIN_PHASEMIS  	0xd1 -#define		TRACEPOINT2     	0xc1 -#define		SEQ_SG_FIXUP    	0xb1 -#define		AWAITING_MSG    	0xa1 -#define		RESIDUAL        	0x81 -#define		BAD_STATUS      	0x71 -#define		REJECT_MSG      	0x61 -#define		WIDE_RESIDUE    	0x51 -#define		EXTENDED_MSG    	0x41 -#define		NO_MATCH        	0x31 -#define		NO_IDENT        	0x21 -#define		SEND_REJECT     	0x11 -#define		INT_PEND        	0x0f -#define		BRKADRINT       	0x08 -#define		SCSIINT         	0x04 -#define		CMDCMPLT        	0x02 -#define		BAD_PHASE       	0x01 -#define		SEQINT          	0x01 - -#define	CLRINT          		0x92 -#define		CLRPARERR       	0x10 -#define		CLRBRKADRINT    	0x08 -#define		CLRSCSIINT      	0x04 -#define		CLRCMDINT       	0x02 -#define		CLRSEQINT       	0x01 - -#define	ERROR           		0x92 -#define		CIOPARERR       	0x80 -#define		PCIERRSTAT      	0x40 -#define		MPARERR         	0x20 -#define		DPARERR         	0x10 -#define		SQPARERR        	0x08 -#define		ILLOPCODE       	0x04 -#define		DSCTMOUT        	0x02 -#define		ILLSADDR        	0x02 -#define		ILLHADDR        	0x01 - -#define	DFCNTRL         		0x93 - -#define	DFSTATUS        		0x94 -#define		PRELOAD_AVAIL   	0x80 -#define		DWORDEMP        	0x20 -#define		MREQPEND        	0x10 -#define		HDONE           	0x08 -#define		DFTHRESH        	0x04 -#define		FIFOFULL        	0x02 -#define		FIFOEMP         	0x01 - -#define	DFDAT           		0x99 - -#define	SCBCNT          		0x9a -#define		SCBAUTO         	0x80 -#define		SCBCNT_MASK     	0x1f - -#define	QINFIFO         		0x9b - -#define	QINCNT          		0x9c - -#define	SCSIDATL_IMG    		0x9c - -#define	QOUTFIFO        		0x9d - -#define	CRCCONTROL1     		0x9d -#define		CRCONSEEN       	0x80 -#define		CRCVALCHKEN     	0x40 -#define		CRCENDCHKEN     	0x20 -#define		CRCREQCHKEN     	0x10 -#define		TARGCRCENDEN    	0x08 -#define		TARGCRCCNTEN    	0x04 - -#define	SCSIPHASE       		0x9e -#define		SP_STATUS       	0x20 -#define		SP_COMMAND      	0x10 -#define		SP_MSG_IN       	0x08 -#define		SP_MSG_OUT      	0x04 -#define		SP_DATA_IN      	0x02 -#define		SP_DATA_OUT     	0x01 - -#define	QOUTCNT         		0x9e - -#define	SFUNCT          		0x9f -#define		ALT_MODE        	0x80 - -#define	SCB_CONTROL     		0xa0 -#define		MK_MESSAGE      	0x80 -#define		DISCENB         	0x40 -#define		TAG_ENB         	0x20 -#define		DISCONNECTED    	0x04 -#define		SCB_TAG_TYPE    	0x03 - -#define	SCB_BASE        		0xa0 - -#define	SCB_TCL         		0xa1 -#define		TID             	0xf0 -#define		SELBUSB         	0x08 -#define		LID             	0x07 - -#define	SCB_TARGET_STATUS		0xa2 - -#define	SCB_SGCOUNT     		0xa3 - -#define	SCB_SGPTR       		0xa4 - -#define	SCB_RESID_SGCNT 		0xa8 - -#define	SCB_RESID_DCNT  		0xa9 - -#define	SCB_DATAPTR     		0xac - -#define	SCB_DATACNT     		0xb0 - -#define	SCB_CMDPTR      		0xb4 - -#define	SCB_CMDLEN      		0xb8 - -#define	SCB_TAG         		0xb9 - -#define	SCB_NEXT        		0xba - -#define	SCB_PREV        		0xbb - -#define	SCB_BUSYTARGETS 		0xbc - -#define	SEECTL_2840     		0xc0 -#define		CS_2840         	0x04 -#define		CK_2840         	0x02 -#define		DO_2840         	0x01 - -#define	STATUS_2840     		0xc1 -#define		EEPROM_TF       	0x80 -#define		BIOS_SEL        	0x60 -#define		ADSEL           	0x1e -#define		DI_2840         	0x01 - -#define	CCHADDR         		0xe0 - -#define	CCHCNT          		0xe8 - -#define	CCSGRAM         		0xe9 - -#define	CCSGADDR        		0xea - -#define	CCSGCTL         		0xeb -#define		CCSGDONE        	0x80 -#define		CCSGEN          	0x08 -#define		FLAG            	0x02 -#define		CCSGRESET       	0x01 - -#define	CCSCBRAM        		0xec - -#define	CCSCBADDR       		0xed - -#define	CCSCBCTL        		0xee -#define		CCSCBDONE       	0x80 -#define		ARRDONE         	0x40 -#define		CCARREN         	0x10 -#define		CCSCBEN         	0x08 -#define		CCSCBDIR        	0x04 -#define		CCSCBRESET      	0x01 - -#define	CCSCBCNT        		0xef - -#define	CCSCBPTR        		0xf1 - -#define	HNSCB_QOFF      		0xf4 - -#define	HESCB_QOFF      		0xf5 - -#define	SNSCB_QOFF      		0xf6 - -#define	SESCB_QOFF      		0xf7 - -#define	SDSCB_QOFF      		0xf8 - -#define	QOFF_CTLSTA     		0xfa -#define		ESTABLISH_SCB_AVAIL	0x80 -#define		SCB_AVAIL       	0x40 -#define		SNSCB_ROLLOVER  	0x20 -#define		SDSCB_ROLLOVER  	0x10 -#define		SESCB_ROLLOVER  	0x08 -#define		SCB_QSIZE       	0x07 -#define		SCB_QSIZE_256   	0x06 - -#define	DFF_THRSH       		0xfb -#define		WR_DFTHRSH      	0x70 -#define		WR_DFTHRSH_MAX  	0x70 -#define		WR_DFTHRSH_90   	0x60 -#define		WR_DFTHRSH_85   	0x50 -#define		WR_DFTHRSH_75   	0x40 -#define		WR_DFTHRSH_63   	0x30 -#define		WR_DFTHRSH_50   	0x20 -#define		WR_DFTHRSH_25   	0x10 -#define		RD_DFTHRSH_MAX  	0x07 -#define		RD_DFTHRSH      	0x07 -#define		RD_DFTHRSH_90   	0x06 -#define		RD_DFTHRSH_85   	0x05 -#define		RD_DFTHRSH_75   	0x04 -#define		RD_DFTHRSH_63   	0x03 -#define		RD_DFTHRSH_50   	0x02 -#define		RD_DFTHRSH_25   	0x01 -#define		WR_DFTHRSH_MIN  	0x00 -#define		RD_DFTHRSH_MIN  	0x00 - -#define	SG_CACHEPTR     		0xfc -#define		SG_USER_DATA    	0xfc -#define		LAST_SEG        	0x02 -#define		LAST_SEG_DONE   	0x01 - - -#define	CMD_GROUP2_BYTE_DELTA	0xfa -#define	MAX_OFFSET_8BIT	0x0f -#define	BUS_16_BIT	0x01 -#define	QINFIFO_OFFSET	0x02 -#define	CMD_GROUP5_BYTE_DELTA	0x0b -#define	CMD_GROUP_CODE_SHIFT	0x05 -#define	MAX_OFFSET_ULTRA2	0x7f -#define	MAX_OFFSET_16BIT	0x08 -#define	BUS_8_BIT	0x00 -#define	QOUTFIFO_OFFSET	0x01 -#define	UNTAGGEDSCB_OFFSET	0x00 -#define	CCSGRAM_MAXSEGS	0x10 -#define	SCB_LIST_NULL	0xff -#define	SG_SIZEOF	0x08 -#define	CMD_GROUP4_BYTE_DELTA	0x04 -#define	CMD_GROUP0_BYTE_DELTA	0xfc -#define	HOST_MSG	0xff -#define	BUS_32_BIT	0x02 -#define	CCSGADDR_MAX	0x80 - - -/* Downloaded Constant Definitions */ -#define	TMODE_NUMCMDS	0x00 diff --git a/drivers/scsi/aic7xxx_old/aic7xxx_seq.c b/drivers/scsi/aic7xxx_old/aic7xxx_seq.c deleted file mode 100644 index e1bc140e973..00000000000 --- a/drivers/scsi/aic7xxx_old/aic7xxx_seq.c +++ /dev/null @@ -1,817 +0,0 @@ -/* -  * DO NOT EDIT - This file is automatically generated. -  */ -static unsigned char seqprog[] = { -	0xff, 0x6a, 0x06, 0x08, -	0x7f, 0x02, 0x04, 0x08, -	0x12, 0x6a, 0x00, 0x00, -	0xff, 0x6a, 0xd6, 0x09, -	0xff, 0x6a, 0xdc, 0x09, -	0x00, 0x65, 0xca, 0x58, -	0xf7, 0x01, 0x02, 0x08, -	0xff, 0x4e, 0xc8, 0x08, -	0xbf, 0x60, 0xc0, 0x08, -	0x60, 0x0b, 0x86, 0x68, -	0x40, 0x00, 0x0c, 0x68, -	0x08, 0x1f, 0x3e, 0x10, -	0x60, 0x0b, 0x86, 0x68, -	0x40, 0x00, 0x0c, 0x68, -	0x08, 0x1f, 0x3e, 0x10, -	0xff, 0x3e, 0x48, 0x60, -	0x40, 0xfa, 0x10, 0x78, -	0xff, 0xf6, 0xd4, 0x08, -	0x01, 0x4e, 0x9c, 0x18, -	0x40, 0x60, 0xc0, 0x00, -	0x00, 0x4d, 0x10, 0x70, -	0x01, 0x4e, 0x9c, 0x18, -	0xbf, 0x60, 0xc0, 0x08, -	0x00, 0x6a, 0x86, 0x5c, -	0xff, 0x4e, 0xc8, 0x18, -	0x02, 0x6a, 0x70, 0x5b, -	0xff, 0x52, 0x20, 0x09, -	0x0d, 0x6a, 0x6a, 0x00, -	0x00, 0x52, 0xe6, 0x5b, -	0x03, 0xb0, 0x52, 0x31, -	0xff, 0xb0, 0x52, 0x09, -	0xff, 0xb1, 0x54, 0x09, -	0xff, 0xb2, 0x56, 0x09, -	0xff, 0xa3, 0x50, 0x09, -	0xff, 0x3e, 0x74, 0x09, -	0xff, 0x90, 0x7c, 0x08, -	0xff, 0x3e, 0x20, 0x09, -	0x00, 0x65, 0x4e, 0x58, -	0x00, 0x65, 0x0c, 0x40, -	0xf7, 0x1f, 0xca, 0x08, -	0x08, 0xa1, 0xc8, 0x08, -	0x00, 0x65, 0xca, 0x00, -	0xff, 0x65, 0x3e, 0x08, -	0xf0, 0xa1, 0xc8, 0x08, -	0x0f, 0x0f, 0x1e, 0x08, -	0x00, 0x0f, 0x1e, 0x00, -	0xf0, 0xa1, 0xc8, 0x08, -	0x0f, 0x05, 0x0a, 0x08, -	0x00, 0x05, 0x0a, 0x00, -	0xff, 0x6a, 0x0c, 0x08, -	0x5a, 0x6a, 0x00, 0x04, -	0x12, 0x65, 0x02, 0x00, -	0x31, 0x6a, 0xca, 0x00, -	0x80, 0x37, 0x6e, 0x68, -	0xff, 0x65, 0xca, 0x18, -	0xff, 0x37, 0xdc, 0x08, -	0xff, 0x6e, 0xc8, 0x08, -	0x00, 0x6c, 0x76, 0x78, -	0x20, 0x01, 0x02, 0x00, -	0x4c, 0x37, 0xc8, 0x28, -	0x08, 0x1f, 0x7e, 0x78, -	0x08, 0x37, 0x6e, 0x00, -	0x08, 0x64, 0xc8, 0x00, -	0x70, 0x64, 0xca, 0x18, -	0xff, 0x6c, 0x0a, 0x08, -	0x20, 0x64, 0xca, 0x18, -	0xff, 0x6c, 0x08, 0x0c, -	0x40, 0x0b, 0x96, 0x68, -	0x20, 0x6a, 0x16, 0x00, -	0xf0, 0x19, 0x6e, 0x08, -	0x08, 0x6a, 0x18, 0x00, -	0x08, 0x11, 0x22, 0x00, -	0x08, 0x6a, 0x66, 0x58, -	0x08, 0x6a, 0x68, 0x00, -	0x00, 0x65, 0xaa, 0x40, -	0x12, 0x6a, 0x00, 0x00, -	0x40, 0x6a, 0x16, 0x00, -	0xff, 0x3e, 0x20, 0x09, -	0xff, 0xba, 0x7c, 0x08, -	0xff, 0xa1, 0x6e, 0x08, -	0x08, 0x6a, 0x18, 0x00, -	0x08, 0x11, 0x22, 0x00, -	0x08, 0x6a, 0x66, 0x58, -	0x80, 0x6a, 0x68, 0x00, -	0x80, 0x36, 0x6c, 0x00, -	0x00, 0x65, 0xba, 0x5b, -	0xff, 0x3d, 0xc8, 0x08, -	0xbf, 0x64, 0xe2, 0x78, -	0x80, 0x64, 0xc8, 0x71, -	0xa0, 0x64, 0xf8, 0x71, -	0xc0, 0x64, 0xf0, 0x71, -	0xe0, 0x64, 0x38, 0x72, -	0x01, 0x6a, 0x22, 0x01, -	0x00, 0x65, 0xaa, 0x40, -	0xf7, 0x11, 0x22, 0x08, -	0x00, 0x65, 0xca, 0x58, -	0xff, 0x06, 0xd4, 0x08, -	0xf7, 0x01, 0x02, 0x08, -	0x09, 0x0c, 0xc4, 0x78, -	0x08, 0x0c, 0x0c, 0x68, -	0x01, 0x6a, 0x22, 0x01, -	0xff, 0x6a, 0x26, 0x09, -	0x02, 0x6a, 0x08, 0x30, -	0xff, 0x6a, 0x08, 0x08, -	0xdf, 0x01, 0x02, 0x08, -	0x01, 0x6a, 0x7a, 0x00, -	0xff, 0x6a, 0x6c, 0x0c, -	0x04, 0x14, 0x10, 0x31, -	0x03, 0xa9, 0x18, 0x31, -	0x03, 0xa9, 0x10, 0x30, -	0x08, 0x6a, 0xcc, 0x00, -	0xa9, 0x6a, 0xd0, 0x5b, -	0x00, 0x65, 0x02, 0x41, -	0xa8, 0x6a, 0x6a, 0x00, -	0x79, 0x6a, 0x6a, 0x00, -	0x40, 0x3d, 0xea, 0x68, -	0x04, 0x35, 0x6a, 0x00, -	0x00, 0x65, 0x2a, 0x5b, -	0x80, 0x6a, 0xd4, 0x01, -	0x10, 0x36, 0xd6, 0x68, -	0x10, 0x36, 0x6c, 0x00, -	0x07, 0xac, 0x10, 0x31, -	0x05, 0xa3, 0x70, 0x30, -	0x03, 0x8c, 0x10, 0x30, -	0x88, 0x6a, 0xcc, 0x00, -	0xac, 0x6a, 0xc8, 0x5b, -	0x00, 0x65, 0xc2, 0x5b, -	0x38, 0x6a, 0xcc, 0x00, -	0xa3, 0x6a, 0xcc, 0x5b, -	0xff, 0x38, 0x12, 0x69, -	0x80, 0x02, 0x04, 0x00, -	0xe7, 0x35, 0x6a, 0x08, -	0x03, 0x69, 0x18, 0x31, -	0x03, 0x69, 0x10, 0x30, -	0xff, 0x6a, 0x10, 0x00, -	0xff, 0x6a, 0x12, 0x00, -	0xff, 0x6a, 0x14, 0x00, -	0x22, 0x38, 0xc8, 0x28, -	0x01, 0x38, 0x1c, 0x61, -	0x02, 0x64, 0xc8, 0x00, -	0x01, 0x38, 0x1c, 0x61, -	0xbf, 0x35, 0x6a, 0x08, -	0xff, 0x64, 0xf8, 0x09, -	0xff, 0x35, 0x26, 0x09, -	0x80, 0x02, 0xa4, 0x69, -	0x10, 0x0c, 0x7a, 0x69, -	0x80, 0x94, 0x22, 0x79, -	0x00, 0x35, 0x0a, 0x5b, -	0x80, 0x02, 0xa4, 0x69, -	0xff, 0x65, 0x94, 0x79, -	0x01, 0x38, 0x70, 0x71, -	0xff, 0x38, 0x70, 0x18, -	0xff, 0x38, 0x94, 0x79, -	0x80, 0xea, 0x4a, 0x61, -	0xef, 0x38, 0xc8, 0x18, -	0x80, 0x6a, 0xc8, 0x00, -	0x00, 0x65, 0x3c, 0x49, -	0x33, 0x38, 0xc8, 0x28, -	0xff, 0x64, 0xd0, 0x09, -	0x04, 0x39, 0xc0, 0x31, -	0x09, 0x6a, 0xd6, 0x01, -	0x80, 0xeb, 0x42, 0x79, -	0xf7, 0xeb, 0xd6, 0x09, -	0x08, 0xeb, 0x46, 0x69, -	0x01, 0x6a, 0xd6, 0x01, -	0x08, 0xe9, 0x10, 0x31, -	0x03, 0x8c, 0x10, 0x30, -	0xff, 0x38, 0x70, 0x18, -	0x88, 0x6a, 0xcc, 0x00, -	0x39, 0x6a, 0xce, 0x5b, -	0x08, 0x6a, 0x18, 0x01, -	0xff, 0x6a, 0x1a, 0x09, -	0xff, 0x6a, 0x1c, 0x09, -	0x0d, 0x93, 0x26, 0x01, -	0x00, 0x65, 0x78, 0x5c, -	0x88, 0x6a, 0xcc, 0x00, -	0x00, 0x65, 0x6a, 0x5c, -	0x00, 0x65, 0xc2, 0x5b, -	0xff, 0x6a, 0xc8, 0x08, -	0x08, 0x39, 0x72, 0x18, -	0x00, 0x3a, 0x74, 0x20, -	0x00, 0x65, 0x02, 0x41, -	0x01, 0x0c, 0x6c, 0x79, -	0x10, 0x0c, 0x02, 0x79, -	0x10, 0x0c, 0x7a, 0x69, -	0x01, 0xfc, 0x70, 0x79, -	0xff, 0x6a, 0x70, 0x08, -	0x01, 0x0c, 0x76, 0x79, -	0x10, 0x0c, 0x02, 0x79, -	0x00, 0x65, 0xae, 0x59, -	0x01, 0xfc, 0x94, 0x69, -	0x40, 0x0d, 0x84, 0x69, -	0xb1, 0x6a, 0x22, 0x01, -	0x00, 0x65, 0x94, 0x41, -	0x2e, 0xfc, 0xa2, 0x28, -	0x3f, 0x38, 0xc8, 0x08, -	0x00, 0x51, 0x94, 0x71, -	0xff, 0x6a, 0xc8, 0x08, -	0xf8, 0x39, 0x72, 0x18, -	0xff, 0x3a, 0x74, 0x20, -	0x01, 0x38, 0x70, 0x18, -	0x00, 0x65, 0x86, 0x41, -	0x03, 0x08, 0x52, 0x31, -	0xff, 0x38, 0x50, 0x09, -	0x12, 0x01, 0x02, 0x00, -	0xff, 0x08, 0x52, 0x09, -	0xff, 0x09, 0x54, 0x09, -	0xff, 0x0a, 0x56, 0x09, -	0xff, 0x38, 0x50, 0x09, -	0x00, 0x65, 0xaa, 0x40, -	0x10, 0x0c, 0xa4, 0x79, -	0x00, 0x65, 0xae, 0x59, -	0x7f, 0x02, 0x04, 0x08, -	0xe1, 0x6a, 0x22, 0x01, -	0x00, 0x65, 0xaa, 0x40, -	0x04, 0x93, 0xc2, 0x69, -	0xdf, 0x93, 0x26, 0x09, -	0x20, 0x93, 0xb2, 0x69, -	0x02, 0x93, 0x26, 0x01, -	0x01, 0x94, 0xb6, 0x79, -	0x01, 0x94, 0xb6, 0x79, -	0x01, 0x94, 0xb6, 0x79, -	0x01, 0x94, 0xb6, 0x79, -	0x01, 0x94, 0xb6, 0x79, -	0x10, 0x94, 0xc0, 0x69, -	0xd7, 0x93, 0x26, 0x09, -	0x28, 0x93, 0xc4, 0x69, -	0xff, 0x6a, 0xd4, 0x0c, -	0x00, 0x65, 0x2a, 0x5b, -	0x05, 0xb4, 0x10, 0x31, -	0x02, 0x6a, 0x1a, 0x31, -	0x03, 0x8c, 0x10, 0x30, -	0x88, 0x6a, 0xcc, 0x00, -	0xb4, 0x6a, 0xcc, 0x5b, -	0xff, 0x6a, 0x1a, 0x09, -	0xff, 0x6a, 0x1c, 0x09, -	0x00, 0x65, 0xc2, 0x5b, -	0x3d, 0x6a, 0x0a, 0x5b, -	0xac, 0x6a, 0x26, 0x01, -	0x04, 0x0b, 0xde, 0x69, -	0x04, 0x0b, 0xe4, 0x69, -	0x10, 0x0c, 0xe0, 0x79, -	0x02, 0x03, 0xe8, 0x79, -	0x11, 0x0c, 0xe4, 0x79, -	0xd7, 0x93, 0x26, 0x09, -	0x28, 0x93, 0xea, 0x69, -	0x12, 0x01, 0x02, 0x00, -	0x00, 0x65, 0xaa, 0x40, -	0x00, 0x65, 0x2a, 0x5b, -	0xff, 0x06, 0x44, 0x09, -	0x00, 0x65, 0xaa, 0x40, -	0x10, 0x3d, 0x06, 0x00, -	0xff, 0x34, 0xca, 0x08, -	0x80, 0x65, 0x1c, 0x62, -	0x0f, 0xa1, 0xca, 0x08, -	0x07, 0xa1, 0xca, 0x08, -	0x40, 0xa0, 0xc8, 0x08, -	0x00, 0x65, 0xca, 0x00, -	0x80, 0x65, 0xca, 0x00, -	0x80, 0xa0, 0x0c, 0x7a, -	0xff, 0x65, 0x0c, 0x08, -	0x00, 0x65, 0x1e, 0x42, -	0x20, 0xa0, 0x24, 0x7a, -	0xff, 0x65, 0x0c, 0x08, -	0x00, 0x65, 0xba, 0x5b, -	0xa0, 0x3d, 0x2c, 0x62, -	0x23, 0xa0, 0x0c, 0x08, -	0x00, 0x65, 0xba, 0x5b, -	0xa0, 0x3d, 0x2c, 0x62, -	0x00, 0xb9, 0x24, 0x42, -	0xff, 0x65, 0x24, 0x62, -	0xa1, 0x6a, 0x22, 0x01, -	0xff, 0x6a, 0xd4, 0x08, -	0x10, 0x51, 0x2c, 0x72, -	0x40, 0x6a, 0x18, 0x00, -	0xff, 0x65, 0x0c, 0x08, -	0x00, 0x65, 0xba, 0x5b, -	0xa0, 0x3d, 0xf6, 0x71, -	0x40, 0x6a, 0x18, 0x00, -	0xff, 0x34, 0xa6, 0x08, -	0x80, 0x34, 0x34, 0x62, -	0x7f, 0xa0, 0x40, 0x09, -	0x08, 0x6a, 0x68, 0x00, -	0x00, 0x65, 0xaa, 0x40, -	0x64, 0x6a, 0x00, 0x5b, -	0x80, 0x64, 0xaa, 0x6a, -	0x04, 0x64, 0x8c, 0x72, -	0x02, 0x64, 0x92, 0x72, -	0x00, 0x6a, 0x54, 0x72, -	0x03, 0x64, 0xa6, 0x72, -	0x01, 0x64, 0x88, 0x72, -	0x07, 0x64, 0xe8, 0x72, -	0x08, 0x64, 0x50, 0x72, -	0x23, 0x64, 0xec, 0x72, -	0x11, 0x6a, 0x22, 0x01, -	0x07, 0x6a, 0xf2, 0x5a, -	0xff, 0x06, 0xd4, 0x08, -	0x00, 0x65, 0xaa, 0x40, -	0xff, 0xa8, 0x58, 0x6a, -	0xff, 0xa2, 0x70, 0x7a, -	0x01, 0x6a, 0x6a, 0x00, -	0x00, 0xb9, 0xe6, 0x5b, -	0xff, 0xa2, 0x70, 0x7a, -	0x71, 0x6a, 0x22, 0x01, -	0xff, 0x6a, 0xd4, 0x08, -	0x40, 0x51, 0x70, 0x62, -	0x0d, 0x6a, 0x6a, 0x00, -	0x00, 0xb9, 0xe6, 0x5b, -	0xff, 0x3e, 0x74, 0x09, -	0xff, 0x90, 0x7c, 0x08, -	0x00, 0x65, 0x4e, 0x58, -	0x00, 0x65, 0xbc, 0x40, -	0x20, 0xa0, 0x78, 0x6a, -	0xff, 0x37, 0xc8, 0x08, -	0x00, 0x6a, 0x90, 0x5b, -	0xff, 0x6a, 0xa6, 0x5b, -	0xff, 0xf8, 0xc8, 0x08, -	0xff, 0x4f, 0xc8, 0x08, -	0x01, 0x6a, 0x90, 0x5b, -	0x00, 0xb9, 0xa6, 0x5b, -	0x01, 0x4f, 0x9e, 0x18, -	0x02, 0x6a, 0x22, 0x01, -	0x00, 0x65, 0x80, 0x5c, -	0x00, 0x65, 0xbc, 0x40, -	0x41, 0x6a, 0x22, 0x01, -	0x00, 0x65, 0xaa, 0x40, -	0x04, 0xa0, 0x40, 0x01, -	0x00, 0x65, 0x98, 0x5c, -	0x00, 0x65, 0xbc, 0x40, -	0x10, 0x36, 0x50, 0x7a, -	0x05, 0x38, 0x46, 0x31, -	0x04, 0x14, 0x58, 0x31, -	0x03, 0xa9, 0x60, 0x31, -	0xa3, 0x6a, 0xcc, 0x00, -	0x38, 0x6a, 0xcc, 0x5b, -	0xac, 0x6a, 0xcc, 0x00, -	0x14, 0x6a, 0xce, 0x5b, -	0xa9, 0x6a, 0xd0, 0x5b, -	0x00, 0x65, 0x50, 0x42, -	0xef, 0x36, 0x6c, 0x08, -	0x00, 0x65, 0x50, 0x42, -	0x0f, 0x64, 0xc8, 0x08, -	0x07, 0x64, 0xc8, 0x08, -	0x00, 0x37, 0x6e, 0x00, -	0xff, 0x6a, 0xa4, 0x00, -	0x00, 0x65, 0x60, 0x5b, -	0xff, 0x51, 0xbc, 0x72, -	0x20, 0x36, 0xc6, 0x7a, -	0x00, 0x90, 0x4e, 0x5b, -	0x00, 0x65, 0xc8, 0x42, -	0xff, 0x06, 0xd4, 0x08, -	0x00, 0x65, 0xba, 0x5b, -	0xe0, 0x3d, 0xe2, 0x62, -	0x20, 0x12, 0xe2, 0x62, -	0x51, 0x6a, 0xf6, 0x5a, -	0x00, 0x65, 0x48, 0x5b, -	0xff, 0x37, 0xc8, 0x08, -	0x00, 0xa1, 0xda, 0x62, -	0x04, 0xa0, 0xda, 0x7a, -	0xfb, 0xa0, 0x40, 0x09, -	0x80, 0x36, 0x6c, 0x00, -	0x80, 0xa0, 0x50, 0x7a, -	0x7f, 0xa0, 0x40, 0x09, -	0xff, 0x6a, 0xf2, 0x5a, -	0x00, 0x65, 0x50, 0x42, -	0x04, 0xa0, 0xe0, 0x7a, -	0x00, 0x65, 0x98, 0x5c, -	0x00, 0x65, 0xe2, 0x42, -	0x00, 0x65, 0x80, 0x5c, -	0x31, 0x6a, 0x22, 0x01, -	0x0c, 0x6a, 0xf2, 0x5a, -	0x00, 0x65, 0x50, 0x42, -	0x61, 0x6a, 0x22, 0x01, -	0x00, 0x65, 0x50, 0x42, -	0x51, 0x6a, 0xf6, 0x5a, -	0x51, 0x6a, 0x22, 0x01, -	0x00, 0x65, 0x50, 0x42, -	0x10, 0x3d, 0x06, 0x00, -	0xff, 0x65, 0x68, 0x0c, -	0xff, 0x06, 0xd4, 0x08, -	0x01, 0x0c, 0xf8, 0x7a, -	0x04, 0x0c, 0xfa, 0x6a, -	0xe0, 0x03, 0x7a, 0x08, -	0xe0, 0x3d, 0x06, 0x63, -	0xff, 0x65, 0xcc, 0x08, -	0xff, 0x12, 0xda, 0x0c, -	0xff, 0x06, 0xd4, 0x0c, -	0xd1, 0x6a, 0x22, 0x01, -	0x00, 0x65, 0xaa, 0x40, -	0xff, 0x65, 0x26, 0x09, -	0x01, 0x0b, 0x1a, 0x6b, -	0x10, 0x0c, 0x0c, 0x7b, -	0x04, 0x0b, 0x14, 0x6b, -	0xff, 0x6a, 0xca, 0x08, -	0x04, 0x93, 0x18, 0x6b, -	0x01, 0x94, 0x16, 0x7b, -	0x10, 0x94, 0x18, 0x6b, -	0x80, 0x3d, 0x1e, 0x73, -	0x0f, 0x04, 0x22, 0x6b, -	0x02, 0x03, 0x22, 0x7b, -	0x11, 0x0c, 0x1e, 0x7b, -	0xc7, 0x93, 0x26, 0x09, -	0xff, 0x99, 0xd4, 0x08, -	0x38, 0x93, 0x24, 0x6b, -	0xff, 0x6a, 0xd4, 0x0c, -	0x80, 0x36, 0x28, 0x6b, -	0x21, 0x6a, 0x22, 0x05, -	0xff, 0x65, 0x20, 0x09, -	0xff, 0x51, 0x36, 0x63, -	0xff, 0x37, 0xc8, 0x08, -	0xa1, 0x6a, 0x42, 0x43, -	0xff, 0x51, 0xc8, 0x08, -	0xb9, 0x6a, 0x42, 0x43, -	0xff, 0x90, 0xa4, 0x08, -	0xff, 0xba, 0x46, 0x73, -	0xff, 0xba, 0x20, 0x09, -	0xff, 0x65, 0xca, 0x18, -	0x00, 0x6c, 0x3a, 0x63, -	0xff, 0x90, 0xca, 0x0c, -	0xff, 0x6a, 0xca, 0x04, -	0x20, 0x36, 0x5a, 0x7b, -	0x00, 0x90, 0x2e, 0x5b, -	0xff, 0x65, 0x5a, 0x73, -	0xff, 0x52, 0x58, 0x73, -	0xff, 0xba, 0xcc, 0x08, -	0xff, 0x52, 0x20, 0x09, -	0xff, 0x66, 0x74, 0x09, -	0xff, 0x65, 0x20, 0x0d, -	0xff, 0xba, 0x7e, 0x0c, -	0x00, 0x6a, 0x86, 0x5c, -	0x0d, 0x6a, 0x6a, 0x00, -	0x00, 0x51, 0xe6, 0x43, -	0xff, 0x3f, 0xb4, 0x73, -	0xff, 0x6a, 0xa2, 0x00, -	0x00, 0x3f, 0x2e, 0x5b, -	0xff, 0x65, 0xb4, 0x73, -	0x20, 0x36, 0x6c, 0x00, -	0x20, 0xa0, 0x6e, 0x6b, -	0xff, 0xb9, 0xa2, 0x0c, -	0xff, 0x6a, 0xa2, 0x04, -	0xff, 0x65, 0xa4, 0x08, -	0xe0, 0x6a, 0xcc, 0x00, -	0x45, 0x6a, 0xda, 0x5b, -	0x01, 0x6a, 0xd0, 0x01, -	0x09, 0x6a, 0xd6, 0x01, -	0x80, 0xeb, 0x7a, 0x7b, -	0x01, 0x6a, 0xd6, 0x01, -	0x01, 0xe9, 0xa4, 0x34, -	0x88, 0x6a, 0xcc, 0x00, -	0x45, 0x6a, 0xda, 0x5b, -	0x01, 0x6a, 0x18, 0x01, -	0xff, 0x6a, 0x1a, 0x09, -	0xff, 0x6a, 0x1c, 0x09, -	0x0d, 0x6a, 0x26, 0x01, -	0x00, 0x65, 0x78, 0x5c, -	0xff, 0x99, 0xa4, 0x0c, -	0xff, 0x65, 0xa4, 0x08, -	0xe0, 0x6a, 0xcc, 0x00, -	0x45, 0x6a, 0xda, 0x5b, -	0x01, 0x6a, 0xd0, 0x01, -	0x01, 0x6a, 0xdc, 0x05, -	0x88, 0x6a, 0xcc, 0x00, -	0x45, 0x6a, 0xda, 0x5b, -	0x01, 0x6a, 0x18, 0x01, -	0xff, 0x6a, 0x1a, 0x09, -	0xff, 0x6a, 0x1c, 0x09, -	0x01, 0x6a, 0x26, 0x05, -	0x01, 0x65, 0xd8, 0x31, -	0x09, 0xee, 0xdc, 0x01, -	0x80, 0xee, 0xaa, 0x7b, -	0xff, 0x6a, 0xdc, 0x0d, -	0xff, 0x65, 0x32, 0x09, -	0x0a, 0x93, 0x26, 0x01, -	0x00, 0x65, 0x78, 0x44, -	0xff, 0x37, 0xc8, 0x08, -	0x00, 0x6a, 0x70, 0x5b, -	0xff, 0x52, 0xa2, 0x0c, -	0x01, 0x0c, 0xba, 0x7b, -	0x04, 0x0c, 0xba, 0x6b, -	0xe0, 0x03, 0x06, 0x08, -	0xe0, 0x03, 0x7a, 0x0c, -	0xff, 0x8c, 0x10, 0x08, -	0xff, 0x8d, 0x12, 0x08, -	0xff, 0x8e, 0x14, 0x0c, -	0xff, 0x6c, 0xda, 0x08, -	0xff, 0x6c, 0xda, 0x08, -	0xff, 0x6c, 0xda, 0x08, -	0xff, 0x6c, 0xda, 0x08, -	0xff, 0x6c, 0xda, 0x08, -	0xff, 0x6c, 0xda, 0x08, -	0xff, 0x6c, 0xda, 0x0c, -	0x3d, 0x64, 0xa4, 0x28, -	0x55, 0x64, 0xc8, 0x28, -	0x00, 0x6c, 0xda, 0x18, -	0xff, 0x52, 0xc8, 0x08, -	0x00, 0x6c, 0xda, 0x20, -	0xff, 0x6a, 0xc8, 0x08, -	0x00, 0x6c, 0xda, 0x20, -	0x00, 0x6c, 0xda, 0x24, -	0xff, 0x65, 0xc8, 0x08, -	0xe0, 0x6a, 0xcc, 0x00, -	0x41, 0x6a, 0xd6, 0x5b, -	0xff, 0x90, 0xe2, 0x09, -	0x20, 0x6a, 0xd0, 0x01, -	0x04, 0x35, 0xf8, 0x7b, -	0x1d, 0x6a, 0xdc, 0x01, -	0xdc, 0xee, 0xf4, 0x63, -	0x00, 0x65, 0x0e, 0x44, -	0x01, 0x6a, 0xdc, 0x01, -	0x20, 0xa0, 0xd8, 0x31, -	0x09, 0xee, 0xdc, 0x01, -	0x80, 0xee, 0xfe, 0x7b, -	0x11, 0x6a, 0xdc, 0x01, -	0x50, 0xee, 0x02, 0x64, -	0x20, 0x6a, 0xd0, 0x01, -	0x09, 0x6a, 0xdc, 0x01, -	0x88, 0xee, 0x08, 0x64, -	0x19, 0x6a, 0xdc, 0x01, -	0xd8, 0xee, 0x0c, 0x64, -	0xff, 0x6a, 0xdc, 0x09, -	0x18, 0xee, 0x10, 0x6c, -	0xff, 0x6a, 0xd4, 0x0c, -	0x88, 0x6a, 0xcc, 0x00, -	0x41, 0x6a, 0xd6, 0x5b, -	0x20, 0x6a, 0x18, 0x01, -	0xff, 0x6a, 0x1a, 0x09, -	0xff, 0x6a, 0x1c, 0x09, -	0xff, 0x35, 0x26, 0x09, -	0x04, 0x35, 0x3c, 0x6c, -	0xa0, 0x6a, 0xca, 0x00, -	0x20, 0x65, 0xc8, 0x18, -	0xff, 0x6c, 0x32, 0x09, -	0xff, 0x6c, 0x32, 0x09, -	0xff, 0x6c, 0x32, 0x09, -	0xff, 0x6c, 0x32, 0x09, -	0xff, 0x6c, 0x32, 0x09, -	0xff, 0x6c, 0x32, 0x09, -	0xff, 0x6c, 0x32, 0x09, -	0xff, 0x6c, 0x32, 0x09, -	0x00, 0x65, 0x26, 0x64, -	0x0a, 0x93, 0x26, 0x01, -	0x00, 0x65, 0x78, 0x44, -	0xa0, 0x6a, 0xcc, 0x00, -	0xe8, 0x6a, 0xc8, 0x00, -	0x01, 0x94, 0x40, 0x6c, -	0x10, 0x94, 0x42, 0x6c, -	0x08, 0x94, 0x54, 0x6c, -	0x08, 0x94, 0x54, 0x6c, -	0x08, 0x94, 0x54, 0x6c, -	0x00, 0x65, 0x68, 0x5c, -	0x08, 0x64, 0xc8, 0x18, -	0x00, 0x8c, 0xca, 0x18, -	0x00, 0x65, 0x4a, 0x4c, -	0x00, 0x65, 0x40, 0x44, -	0xf7, 0x93, 0x26, 0x09, -	0x08, 0x93, 0x56, 0x6c, -	0x00, 0x65, 0x68, 0x5c, -	0x08, 0x64, 0xc8, 0x18, -	0x08, 0x64, 0x58, 0x64, -	0xff, 0x6a, 0xd4, 0x0c, -	0x00, 0x65, 0x78, 0x5c, -	0x00, 0x65, 0x68, 0x5c, -	0x00, 0x65, 0x68, 0x5c, -	0x00, 0x65, 0x68, 0x5c, -	0xff, 0x99, 0xda, 0x08, -	0xff, 0x99, 0xda, 0x08, -	0xff, 0x99, 0xda, 0x08, -	0xff, 0x99, 0xda, 0x08, -	0xff, 0x99, 0xda, 0x08, -	0xff, 0x99, 0xda, 0x08, -	0xff, 0x99, 0xda, 0x08, -	0xff, 0x99, 0xda, 0x0c, -	0x08, 0x94, 0x78, 0x7c, -	0xf7, 0x93, 0x26, 0x09, -	0x08, 0x93, 0x7c, 0x6c, -	0xff, 0x6a, 0xd4, 0x0c, -	0xff, 0x40, 0x74, 0x09, -	0xff, 0x90, 0x80, 0x08, -	0xff, 0x6a, 0x72, 0x05, -	0xff, 0x40, 0x94, 0x64, -	0xff, 0x3f, 0x8c, 0x64, -	0xff, 0x6a, 0xca, 0x04, -	0xff, 0x3f, 0x20, 0x09, -	0x01, 0x6a, 0x6a, 0x00, -	0x00, 0xb9, 0xe6, 0x5b, -	0xff, 0xba, 0x7e, 0x0c, -	0xff, 0x40, 0x20, 0x09, -	0xff, 0xba, 0x80, 0x0c, -	0xff, 0x3f, 0x74, 0x09, -	0xff, 0x90, 0x7e, 0x0c, -}; - -static int aic7xxx_patch15_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch15_func(struct aic7xxx_host *p) -{ -	return ((p->bugs & AHC_BUG_SCBCHAN_UPLOAD) != 0); -} - -static int aic7xxx_patch14_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch14_func(struct aic7xxx_host *p) -{ -	return ((p->bugs & AHC_BUG_PCI_2_1_RETRY) != 0); -} - -static int aic7xxx_patch13_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch13_func(struct aic7xxx_host *p) -{ -	return ((p->features & AHC_WIDE) != 0); -} - -static int aic7xxx_patch12_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch12_func(struct aic7xxx_host *p) -{ -	return ((p->bugs & AHC_BUG_AUTOFLUSH) != 0); -} - -static int aic7xxx_patch11_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch11_func(struct aic7xxx_host *p) -{ -	return ((p->features & AHC_ULTRA2) == 0); -} - -static int aic7xxx_patch10_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch10_func(struct aic7xxx_host *p) -{ -	return ((p->features & AHC_CMD_CHAN) == 0); -} - -static int aic7xxx_patch9_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch9_func(struct aic7xxx_host *p) -{ -	return ((p->chip & AHC_CHIPID_MASK) == AHC_AIC7895); -} - -static int aic7xxx_patch8_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch8_func(struct aic7xxx_host *p) -{ -	return ((p->features & AHC_ULTRA) != 0); -} - -static int aic7xxx_patch7_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch7_func(struct aic7xxx_host *p) -{ -	return ((p->features & AHC_ULTRA2) != 0); -} - -static int aic7xxx_patch6_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch6_func(struct aic7xxx_host *p) -{ -	return ((p->flags & AHC_PAGESCBS) == 0); -} - -static int aic7xxx_patch5_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch5_func(struct aic7xxx_host *p) -{ -	return ((p->flags & AHC_PAGESCBS) != 0); -} - -static int aic7xxx_patch4_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch4_func(struct aic7xxx_host *p) -{ -	return ((p->features & AHC_QUEUE_REGS) != 0); -} - -static int aic7xxx_patch3_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch3_func(struct aic7xxx_host *p) -{ -	return ((p->features & AHC_TWIN) != 0); -} - -static int aic7xxx_patch2_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch2_func(struct aic7xxx_host *p) -{ -	return ((p->features & AHC_QUEUE_REGS) == 0); -} - -static int aic7xxx_patch1_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch1_func(struct aic7xxx_host *p) -{ -	return ((p->features & AHC_CMD_CHAN) != 0); -} - -static int aic7xxx_patch0_func(struct aic7xxx_host *p); - -static int -aic7xxx_patch0_func(struct aic7xxx_host *p) -{ -	return (0); -} - -struct sequencer_patch { -	int		(*patch_func)(struct aic7xxx_host *); -	unsigned int	begin	   :10, -			skip_instr :10, -			skip_patch :12; -} sequencer_patches[] = { -	{ aic7xxx_patch1_func, 3, 2, 1 }, -	{ aic7xxx_patch2_func, 7, 1, 1 }, -	{ aic7xxx_patch2_func, 8, 1, 1 }, -	{ aic7xxx_patch3_func, 11, 4, 1 }, -	{ aic7xxx_patch4_func, 16, 3, 2 }, -	{ aic7xxx_patch0_func, 19, 4, 1 }, -	{ aic7xxx_patch5_func, 23, 1, 1 }, -	{ aic7xxx_patch6_func, 26, 1, 1 }, -	{ aic7xxx_patch1_func, 29, 1, 2 }, -	{ aic7xxx_patch0_func, 30, 3, 1 }, -	{ aic7xxx_patch3_func, 39, 4, 1 }, -	{ aic7xxx_patch7_func, 43, 3, 2 }, -	{ aic7xxx_patch0_func, 46, 3, 1 }, -	{ aic7xxx_patch8_func, 52, 7, 1 }, -	{ aic7xxx_patch3_func, 60, 3, 1 }, -	{ aic7xxx_patch7_func, 63, 2, 1 }, -	{ aic7xxx_patch7_func, 102, 1, 2 }, -	{ aic7xxx_patch0_func, 103, 2, 1 }, -	{ aic7xxx_patch7_func, 107, 2, 1 }, -	{ aic7xxx_patch9_func, 109, 1, 1 }, -	{ aic7xxx_patch10_func, 110, 2, 1 }, -	{ aic7xxx_patch7_func, 113, 1, 2 }, -	{ aic7xxx_patch0_func, 114, 1, 1 }, -	{ aic7xxx_patch1_func, 118, 1, 1 }, -	{ aic7xxx_patch1_func, 121, 3, 3 }, -	{ aic7xxx_patch11_func, 123, 1, 1 }, -	{ aic7xxx_patch0_func, 124, 5, 1 }, -	{ aic7xxx_patch7_func, 132, 1, 1 }, -	{ aic7xxx_patch9_func, 133, 1, 1 }, -	{ aic7xxx_patch10_func, 134, 3, 1 }, -	{ aic7xxx_patch7_func, 137, 3, 2 }, -	{ aic7xxx_patch0_func, 140, 2, 1 }, -	{ aic7xxx_patch7_func, 142, 5, 2 }, -	{ aic7xxx_patch0_func, 147, 3, 1 }, -	{ aic7xxx_patch7_func, 150, 1, 2 }, -	{ aic7xxx_patch0_func, 151, 2, 1 }, -	{ aic7xxx_patch1_func, 153, 15, 4 }, -	{ aic7xxx_patch11_func, 166, 1, 2 }, -	{ aic7xxx_patch0_func, 167, 1, 1 }, -	{ aic7xxx_patch0_func, 168, 10, 1 }, -	{ aic7xxx_patch7_func, 181, 1, 2 }, -	{ aic7xxx_patch0_func, 182, 2, 1 }, -	{ aic7xxx_patch7_func, 184, 18, 1 }, -	{ aic7xxx_patch1_func, 202, 3, 3 }, -	{ aic7xxx_patch7_func, 204, 1, 1 }, -	{ aic7xxx_patch0_func, 205, 4, 1 }, -	{ aic7xxx_patch7_func, 210, 2, 1 }, -	{ aic7xxx_patch7_func, 215, 13, 3 }, -	{ aic7xxx_patch12_func, 218, 1, 1 }, -	{ aic7xxx_patch12_func, 219, 4, 1 }, -	{ aic7xxx_patch1_func, 229, 3, 3 }, -	{ aic7xxx_patch11_func, 231, 1, 1 }, -	{ aic7xxx_patch0_func, 232, 5, 1 }, -	{ aic7xxx_patch11_func, 237, 1, 2 }, -	{ aic7xxx_patch0_func, 238, 9, 1 }, -	{ aic7xxx_patch13_func, 254, 1, 2 }, -	{ aic7xxx_patch0_func, 255, 1, 1 }, -	{ aic7xxx_patch4_func, 316, 1, 2 }, -	{ aic7xxx_patch0_func, 317, 1, 1 }, -	{ aic7xxx_patch2_func, 320, 1, 1 }, -	{ aic7xxx_patch1_func, 330, 3, 2 }, -	{ aic7xxx_patch0_func, 333, 5, 1 }, -	{ aic7xxx_patch13_func, 341, 1, 2 }, -	{ aic7xxx_patch0_func, 342, 1, 1 }, -	{ aic7xxx_patch5_func, 347, 1, 1 }, -	{ aic7xxx_patch11_func, 389, 15, 2 }, -	{ aic7xxx_patch14_func, 402, 1, 1 }, -	{ aic7xxx_patch1_func, 441, 7, 2 }, -	{ aic7xxx_patch0_func, 448, 8, 1 }, -	{ aic7xxx_patch1_func, 457, 4, 2 }, -	{ aic7xxx_patch0_func, 461, 6, 1 }, -	{ aic7xxx_patch1_func, 467, 4, 2 }, -	{ aic7xxx_patch0_func, 471, 3, 1 }, -	{ aic7xxx_patch10_func, 481, 10, 1 }, -	{ aic7xxx_patch1_func, 500, 22, 5 }, -	{ aic7xxx_patch11_func, 508, 4, 1 }, -	{ aic7xxx_patch7_func, 512, 7, 3 }, -	{ aic7xxx_patch15_func, 512, 5, 2 }, -	{ aic7xxx_patch0_func, 517, 2, 1 }, -	{ aic7xxx_patch10_func, 522, 50, 3 }, -	{ aic7xxx_patch14_func, 543, 17, 2 }, -	{ aic7xxx_patch0_func, 560, 4, 1 }, -	{ aic7xxx_patch10_func, 572, 4, 1 }, -	{ aic7xxx_patch5_func, 576, 2, 1 }, -	{ aic7xxx_patch5_func, 579, 9, 1 }, - -}; diff --git a/drivers/scsi/aic7xxx_old/scsi_message.h b/drivers/scsi/aic7xxx_old/scsi_message.h deleted file mode 100644 index a79f89c6517..00000000000 --- a/drivers/scsi/aic7xxx_old/scsi_message.h +++ /dev/null @@ -1,49 +0,0 @@ -/* Messages (1 byte) */		     /* I/T (M)andatory or (O)ptional */ -#define MSG_CMDCOMPLETE		0x00 /* M/M */ -#define MSG_EXTENDED		0x01 /* O/O */ -#define MSG_SAVEDATAPOINTER	0x02 /* O/O */ -#define MSG_RESTOREPOINTERS	0x03 /* O/O */ -#define MSG_DISCONNECT		0x04 /* O/O */ -#define MSG_INITIATOR_DET_ERR	0x05 /* M/M */ -#define MSG_ABORT		0x06 /* O/M */ -#define MSG_MESSAGE_REJECT	0x07 /* M/M */ -#define MSG_NOOP		0x08 /* M/M */ -#define MSG_PARITY_ERROR	0x09 /* M/M */ -#define MSG_LINK_CMD_COMPLETE	0x0a /* O/O */ -#define MSG_LINK_CMD_COMPLETEF	0x0b /* O/O */ -#define MSG_BUS_DEV_RESET	0x0c /* O/M */ -#define MSG_ABORT_TAG		0x0d /* O/O */ -#define MSG_CLEAR_QUEUE		0x0e /* O/O */ -#define MSG_INIT_RECOVERY	0x0f /* O/O */ -#define MSG_REL_RECOVERY	0x10 /* O/O */ -#define MSG_TERM_IO_PROC	0x11 /* O/O */ - -/* Messages (2 byte) */ -#define MSG_SIMPLE_Q_TAG	0x20 /* O/O */ -#define MSG_HEAD_OF_Q_TAG	0x21 /* O/O */ -#define MSG_ORDERED_Q_TAG	0x22 /* O/O */ -#define MSG_IGN_WIDE_RESIDUE	0x23 /* O/O */ - -/* Identify message */		     /* M/M */	 -#define MSG_IDENTIFYFLAG	0x80  -#define MSG_IDENTIFY_DISCFLAG	0x40  -#define MSG_IDENTIFY(lun, disc)	(((disc) ? 0xc0 : MSG_IDENTIFYFLAG) | (lun)) -#define MSG_ISIDENTIFY(m)	((m) & MSG_IDENTIFYFLAG) - -/* Extended messages (opcode and length) */ -#define MSG_EXT_SDTR		0x01 -#define MSG_EXT_SDTR_LEN	0x03 - -#define MSG_EXT_WDTR		0x03 -#define MSG_EXT_WDTR_LEN	0x02 -#define MSG_EXT_WDTR_BUS_8_BIT	0x00 -#define MSG_EXT_WDTR_BUS_16_BIT	0x01 -#define MSG_EXT_WDTR_BUS_32_BIT	0x02 - -#define MSG_EXT_PPR     0x04 -#define MSG_EXT_PPR_LEN	0x06 -#define MSG_EXT_PPR_OPTION_ST 0x00 -#define MSG_EXT_PPR_OPTION_DT_CRC 0x02 -#define MSG_EXT_PPR_OPTION_DT_UNITS 0x03 -#define MSG_EXT_PPR_OPTION_DT_CRC_QUICK 0x04 -#define MSG_EXT_PPR_OPTION_DT_UNITS_QUICK 0x05 diff --git a/drivers/scsi/aic7xxx_old/sequencer.h b/drivers/scsi/aic7xxx_old/sequencer.h deleted file mode 100644 index ee66855222b..00000000000 --- a/drivers/scsi/aic7xxx_old/sequencer.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Instruction formats for the sequencer program downloaded to - * Aic7xxx SCSI host adapters - * - * Copyright (c) 1997, 1998 Justin T. Gibbs. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - *    notice, this list of conditions, and the following disclaimer, - *    without modification, immediately at the beginning of the file. - * 2. The name of the author may not be used to endorse or promote products - *    derived from this software without specific prior written permission. - * - * Where this Software is combined with software released under the terms of  - * the GNU General Public License ("GPL") and the terms of the GPL would require the  - * combined work to also be released under the terms of the GPL, the terms - * and conditions of this License will apply in addition to those of the - * GPL with the exception of any terms or conditions of this License that - * conflict with, or are expressly prohibited by, the GPL. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - *      $Id: sequencer.h,v 1.3 1997/09/27 19:37:31 gibbs Exp $ - */ - -#ifdef __LITTLE_ENDIAN_BITFIELD -struct ins_format1 { -	unsigned int -			immediate	: 8, -			source		: 9, -			destination	: 9, -			ret		: 1, -			opcode		: 4, -			parity		: 1; -}; - -struct ins_format2 { -	unsigned int -			shift_control	: 8, -			source		: 9, -			destination	: 9, -			ret		: 1, -			opcode		: 4, -			parity		: 1; -}; - -struct ins_format3 { -	unsigned int -			immediate	: 8, -			source		: 9, -			address		: 10, -			opcode		: 4, -			parity		: 1; -}; -#elif defined(__BIG_ENDIAN_BITFIELD) -struct ins_format1 { -	unsigned int -			parity		: 1, -			opcode		: 4, -			ret		: 1, -			destination	: 9, -			source		: 9, -			immediate	: 8; -}; - -struct ins_format2 { -	unsigned int -			parity		: 1, -			opcode		: 4, -			ret		: 1, -			destination	: 9, -			source		: 9, -			shift_control	: 8; -}; - -struct ins_format3 { -	unsigned int -			parity		: 1, -			opcode		: 4, -			address		: 10, -			source		: 9, -			immediate	: 8; -}; -#endif - -union ins_formats { -		struct ins_format1 format1; -		struct ins_format2 format2; -		struct ins_format3 format3; -		unsigned char	   bytes[4]; -		unsigned int	   integer; -}; -struct instruction { -	union	ins_formats format; -	unsigned int	srcline; -	struct symbol *patch_label; -  struct { -    struct instruction *stqe_next; -  } links; -}; - -#define	AIC_OP_OR	0x0 -#define	AIC_OP_AND	0x1 -#define AIC_OP_XOR	0x2 -#define	AIC_OP_ADD	0x3 -#define	AIC_OP_ADC	0x4 -#define	AIC_OP_ROL	0x5 -#define	AIC_OP_BMOV	0x6 - -#define	AIC_OP_JMP	0x8 -#define AIC_OP_JC	0x9 -#define AIC_OP_JNC	0xa -#define AIC_OP_CALL	0xb -#define	AIC_OP_JNE	0xc -#define	AIC_OP_JNZ	0xd -#define	AIC_OP_JE	0xe -#define	AIC_OP_JZ	0xf - -/* Pseudo Ops */ -#define	AIC_OP_SHL	0x10 -#define	AIC_OP_SHR	0x20 -#define	AIC_OP_ROR	0x30 diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 33c52bc2c7b..652b41b4ddb 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -137,6 +137,7 @@ static struct scsi_host_template arcmsr_scsi_host_template = {  	.cmd_per_lun		= ARCMSR_MAX_CMD_PERLUN,  	.use_clustering		= ENABLE_CLUSTERING,  	.shost_attrs		= arcmsr_host_attrs, +	.no_write_same		= 1,  };  static struct pci_device_id arcmsr_device_id_table[] = {  	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, @@ -1035,7 +1036,6 @@ static void arcmsr_remove(struct pci_dev *pdev)  	pci_release_regions(pdev);  	scsi_host_put(host);  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  }  static void arcmsr_shutdown(struct pci_dev *pdev) @@ -2500,16 +2500,15 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,  static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)  {  	uint32_t cdb_phyaddr, cdb_phyaddr_hi32; -	dma_addr_t dma_coherent_handle; +  	/*  	********************************************************************  	** here we need to tell iop 331 our freeccb.HighPart  	** if freeccb.HighPart is not zero  	********************************************************************  	*/ -	dma_coherent_handle = acb->dma_coherent_handle; -	cdb_phyaddr = (uint32_t)(dma_coherent_handle); -	cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16); +	cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle); +	cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);  	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;  	/*  	*********************************************************************** diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 09ba1869d36..2e797a36760 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -62,13 +62,6 @@   */  #undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE  /* - * SCSI-II Linked command support. - * - * The higher level code doesn't support linked commands yet, and so the option - * is undef'd here. - */ -#undef CONFIG_SCSI_ACORNSCSI_LINK -/*   * SCSI-II Synchronous transfer support.   *   * Tried and tested... @@ -160,10 +153,6 @@  #error "Yippee!  ABORT TAG is now defined!  Remove this error!"  #endif -#ifdef CONFIG_SCSI_ACORNSCSI_LINK -#error SCSI2 LINKed commands not supported (yet)! -#endif -  #ifdef USE_DMAC  /*   * DMAC setup parameters @@ -1668,42 +1657,6 @@ void acornscsi_message(AS_Host *host)  	}  	break; -#ifdef CONFIG_SCSI_ACORNSCSI_LINK -    case LINKED_CMD_COMPLETE: -    case LINKED_FLG_CMD_COMPLETE: -	/* -	 * We don't support linked commands yet -	 */ -	if (0) { -#if (DEBUG & DEBUG_LINK) -	    printk("scsi%d.%c: lun %d tag %d linked command complete\n", -		    host->host->host_no, acornscsi_target(host), host->SCpnt->tag); -#endif -	    /* -	     * A linked command should only terminate with one of these messages -	     * if there are more linked commands available. -	     */ -	    if (!host->SCpnt->next_link) { -		printk(KERN_WARNING "scsi%d.%c: lun %d tag %d linked command complete, but no next_link\n", -			instance->host_no, acornscsi_target(host), host->SCpnt->tag); -		acornscsi_sbic_issuecmd(host, CMND_ASSERTATN); -		msgqueue_addmsg(&host->scsi.msgs, 1, ABORT); -	    } else { -		struct scsi_cmnd *SCpnt = host->SCpnt; - -		acornscsi_dma_cleanup(host); - -		host->SCpnt = host->SCpnt->next_link; -		host->SCpnt->tag = SCpnt->tag; -		SCpnt->result = DID_OK | host->scsi.SCp.Message << 8 | host->Scsi.SCp.Status; -		SCpnt->done(SCpnt); - -		/* initialise host->SCpnt->SCp */ -	    } -	    break; -	} -#endif -      default: /* reject message */  	printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n",  		host->host->host_no, acornscsi_target(host), @@ -2825,9 +2778,6 @@ char *acornscsi_info(struct Scsi_Host *host)  #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE      " TAG"  #endif -#ifdef CONFIG_SCSI_ACORNSCSI_LINK -    " LINK" -#endif  #if (DEBUG & DEBUG_NO_WRITE)      " NOWRITE (" __stringify(NO_WRITE) ")"  #endif @@ -2851,9 +2801,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)  #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE      " TAG"  #endif -#ifdef CONFIG_SCSI_ACORNSCSI_LINK -    " LINK" -#endif  #if (DEBUG & DEBUG_NO_WRITE)      " NOWRITE (" __stringify(NO_WRITE) ")"  #endif @@ -2971,7 +2918,7 @@ static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)  	ec->irqaddr	= ashost->fast + INT_REG;  	ec->irqmask	= 0x0a; -	ret = request_irq(host->irq, acornscsi_intr, IRQF_DISABLED, "acornscsi", ashost); +	ret = request_irq(host->irq, acornscsi_intr, 0, "acornscsi", ashost);  	if (ret) {  		printk(KERN_CRIT "scsi%d: IRQ%d not free: %d\n",  			host->host_no, ashost->scsi.irq, ret); diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c index b679778376c..8ef810a4476 100644 --- a/drivers/scsi/arm/cumana_1.c +++ b/drivers/scsi/arm/cumana_1.c @@ -36,9 +36,6 @@  	void __iomem *base;		\  	void __iomem *dma -#define BOARD_NORMAL	0 -#define BOARD_NCR53C400	1 -  #include "../NCR5380.h"  void cumanascsi_setup(char *str, int *ints) @@ -262,7 +259,7 @@ static int cumanascsi1_probe(struct expansion_card *ec,  		goto out_unmap;  	} -	ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED, +	ret = request_irq(host->irq, cumanascsi_intr, 0,  			  "CumanaSCSI-1", host);  	if (ret) {  		printk("scsi%d: IRQ%d not free: %d\n", diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c index 58915f29055..abc66f5263e 100644 --- a/drivers/scsi/arm/cumana_2.c +++ b/drivers/scsi/arm/cumana_2.c @@ -431,7 +431,7 @@ static int cumanascsi2_probe(struct expansion_card *ec,  		goto out_free;  	ret = request_irq(ec->irq, cumanascsi_2_intr, -			  IRQF_DISABLED, "cumanascsi2", info); +			  0, "cumanascsi2", info);  	if (ret) {  		printk("scsi%d: IRQ%d not free: %d\n",  		       host->host_no, ec->irq, ret); diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c index 4266eef8aca..188e734c7ff 100644 --- a/drivers/scsi/arm/oak.c +++ b/drivers/scsi/arm/oak.c @@ -37,9 +37,6 @@  #define NCR5380_implementation_fields	\  	void __iomem *base -#define BOARD_NORMAL	0 -#define BOARD_NCR53C400	1 -  #include "../NCR5380.h"  #undef START_DMA_INITIATOR_RECEIVE_REG diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c index abc9593615e..5e1b73e1b74 100644 --- a/drivers/scsi/arm/powertec.c +++ b/drivers/scsi/arm/powertec.c @@ -358,7 +358,7 @@ static int powertecscsi_probe(struct expansion_card *ec,  		goto out_free;  	ret = request_irq(ec->irq, powertecscsi_intr, -			  IRQF_DISABLED, "powertec", info); +			  0, "powertec", info);  	if (ret) {  		printk("scsi%d: IRQ%d not free: %d\n",  		       host->host_no, ec->irq, ret); diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 0f3cdbc80ba..1814aa20b72 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -370,7 +370,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)  		return 0;  	if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=  	    TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { -		TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n", +		dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",  			   H_NO(cmd), cmd->device->id, cmd->device->lun);  		return 1;  	} @@ -394,7 +394,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)  	    !setup_use_tagged_queuing || !cmd->device->tagged_supported) {  		cmd->tag = TAG_NONE;  		hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); -		TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged " +		dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "  			   "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun);  	} else {  		TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; @@ -402,7 +402,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)  		cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);  		set_bit(cmd->tag, ta->allocated);  		ta->nr_allocated++; -		TAG_PRINTK("scsi%d: using tag %d for target %d lun %d " +		dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "  			   "(now %d tags in use)\n",  			   H_NO(cmd), cmd->tag, cmd->device->id,  			   cmd->device->lun, ta->nr_allocated); @@ -420,7 +420,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)  	if (cmd->tag == TAG_NONE) {  		hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -		TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n", +		dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",  			   H_NO(cmd), cmd->device->id, cmd->device->lun);  	} else if (cmd->tag >= MAX_TAGS) {  		printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", @@ -429,7 +429,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)  		TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];  		clear_bit(cmd->tag, ta->allocated);  		ta->nr_allocated--; -		TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n", +		dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",  			   H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun);  	}  } @@ -478,7 +478,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)  	for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;  	     cmd->SCp.buffers_residual &&  	     virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) { -		MER_PRINTK("VTOP(%p) == %08lx -> merging\n", +		dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n",  			   page_address(sg_page(&cmd->SCp.buffer[1])), endaddr);  #if (NDEBUG & NDEBUG_MERGING)  		++cnt; @@ -490,7 +490,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)  	}  #if (NDEBUG & NDEBUG_MERGING)  	if (oldlen != cmd->SCp.this_residual) -		MER_PRINTK("merged %d buffers from %p, new length %08x\n", +		dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",  			   cnt, cmd->SCp.ptr, cmd->SCp.this_residual);  #endif  } @@ -626,16 +626,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)  	}  } -#else /* !NDEBUG */ - -/* dummies... */ -static inline void NCR5380_print(struct Scsi_Host *instance) -{ -}; -static inline void NCR5380_print_phase(struct Scsi_Host *instance) -{ -}; -  #endif  /* @@ -676,7 +666,7 @@ static inline void NCR5380_all_init(void)  {  	static int done = 0;  	if (!done) { -		INI_PRINTK("scsi : NCR5380_all_init()\n"); +		dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");  		done = 1;  	}  } @@ -739,8 +729,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)  	Scsi_Cmnd *ptr;  	unsigned long flags; -	NCR_PRINT(NDEBUG_ANY); -	NCR_PRINT_PHASE(NDEBUG_ANY); +	NCR5380_dprint(NDEBUG_ANY, instance); +	NCR5380_dprint_phase(NDEBUG_ANY, instance);  	hostdata = (struct NCR5380_hostdata *)instance->hostdata; @@ -984,7 +974,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))  	}  	local_irq_restore(flags); -	QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), +	dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),  		  (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");  	/* If queue_command() is called from an interrupt (real one or bottom @@ -1054,7 +1044,7 @@ static void NCR5380_main(struct work_struct *work)  		done = 1;  		if (!hostdata->connected) { -			MAIN_PRINTK("scsi%d: not connected\n", HOSTNO); +			dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO);  			/*  			 * Search through the issue_queue for a command destined  			 * for a target that's not busy. @@ -1107,7 +1097,7 @@ static void NCR5380_main(struct work_struct *work)  					 * On failure, we must add the command back to the  					 *   issue queue so we can keep trying.  					 */ -					MAIN_PRINTK("scsi%d: main(): command for target %d " +					dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "  						    "lun %d removed from issue_queue\n",  						    HOSTNO, tmp->device->id, tmp->device->lun);  					/* @@ -1140,7 +1130,7 @@ static void NCR5380_main(struct work_struct *work)  #endif  						falcon_dont_release--;  						local_irq_restore(flags); -						MAIN_PRINTK("scsi%d: main(): select() failed, " +						dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "  							    "returned to issue_queue\n", HOSTNO);  						if (hostdata->connected)  							break; @@ -1155,10 +1145,10 @@ static void NCR5380_main(struct work_struct *work)  #endif  		    ) {  			local_irq_restore(flags); -			MAIN_PRINTK("scsi%d: main: performing information transfer\n", +			dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",  				    HOSTNO);  			NCR5380_information_transfer(instance); -			MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); +			dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);  			done = 0;  		}  	} while (!done); @@ -1204,12 +1194,12 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)  			    (BASR_PHASE_MATCH|BASR_ACK)) {  				saved_data = NCR5380_read(INPUT_DATA_REG);  				overrun = 1; -				DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO); +				dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO);  			}  		}  	} -	DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", +	dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",  		   HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),  		   NCR5380_read(STATUS_REG)); @@ -1229,13 +1219,13 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)  		if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {  			cnt = toPIO = atari_read_overruns;  			if (overrun) { -				DMA_PRINTK("Got an input overrun, using saved byte\n"); +				dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");  				*(*data)++ = saved_data;  				(*count)--;  				cnt--;  				toPIO--;  			} -			DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); +			dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);  			NCR5380_transfer_pio(instance, &p, &cnt, data);  			*count -= toPIO - cnt;  		} @@ -1261,25 +1251,25 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)  	int done = 1, handled = 0;  	unsigned char basr; -	INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); +	dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);  	/* Look for pending interrupts */  	basr = NCR5380_read(BUS_AND_STATUS_REG); -	INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); +	dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);  	/* dispatch to appropriate routine if found and done=0 */  	if (basr & BASR_IRQ) { -		NCR_PRINT(NDEBUG_INTR); +		NCR5380_dprint(NDEBUG_INTR, instance);  		if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {  			done = 0;  			ENABLE_IRQ(); -			INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); +			dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);  			NCR5380_reselect(instance);  			(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);  		} else if (basr & BASR_PARITY_ERROR) { -			INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); +			dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);  			(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);  		} else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { -			INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); +			dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);  			(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);  		} else {  			/* @@ -1298,7 +1288,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)  			    ((basr & BASR_END_DMA_TRANSFER) ||  			     !(basr & BASR_PHASE_MATCH))) { -				INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); +				dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);  				NCR5380_dma_complete( instance );  				done = 0;  				ENABLE_IRQ(); @@ -1323,7 +1313,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)  	}  	if (!done) { -		INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); +		dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);  		/* Put a call to NCR5380_main() on the queue... */  		queue_main();  	} @@ -1396,8 +1386,8 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  	unsigned long flags;  	hostdata->restart_select = 0; -	NCR_PRINT(NDEBUG_ARBITRATION); -	ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, +	NCR5380_dprint(NDEBUG_ARBITRATION, instance); +	dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,  		   instance->this_id);  	/* @@ -1442,7 +1432,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  		;  #endif -	ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); +	dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);  	if (hostdata->connected) {  		NCR5380_write(MODE_REG, MR_BASE); @@ -1463,7 +1453,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  	    (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||  	    hostdata->connected) {  		NCR5380_write(MODE_REG, MR_BASE); -		ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", +		dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",  			   HOSTNO);  		return -1;  	} @@ -1478,7 +1468,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  	    hostdata->connected) {  		NCR5380_write(MODE_REG, MR_BASE);  		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); -		ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", +		dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",  			   HOSTNO);  		return -1;  	} @@ -1501,7 +1491,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  		return -1;  	} -	ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); +	dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);  	/*  	 * Now that we have won arbitration, start Selection process, asserting @@ -1561,7 +1551,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  	udelay(1); -	SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); +	dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);  	/*  	 * The SCSI specification calls for a 250 ms timeout for the actual @@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  			printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);  			if (hostdata->restart_select)  				printk(KERN_NOTICE "\trestart select\n"); -			NCR_PRINT(NDEBUG_ANY); +			NCR5380_dprint(NDEBUG_ANY, instance);  			NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);  			return -1;  		} @@ -1630,7 +1620,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  #endif  		cmd->scsi_done(cmd);  		NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); -		SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); +		dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);  		NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);  		return 0;  	} @@ -1656,7 +1646,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  	while (!(NCR5380_read(STATUS_REG) & SR_REQ))  		; -	SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", +	dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",  		   HOSTNO, cmd->device->id);  	tmp[0] = IDENTIFY(1, cmd->device->lun); @@ -1676,7 +1666,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)  	data = tmp;  	phase = PHASE_MSGOUT;  	NCR5380_transfer_pio(instance, &phase, &len, &data); -	SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); +	dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);  	/* XXX need to handle errors here */  	hostdata->connected = cmd;  #ifndef SUPPORT_TAGS @@ -1737,12 +1727,12 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,  		while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))  			; -		HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); +		dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);  		/* Check for phase mismatch */  		if ((tmp & PHASE_MASK) != p) { -			PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); -			NCR_PRINT_PHASE(NDEBUG_PIO); +			dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); +			NCR5380_dprint_phase(NDEBUG_PIO, instance);  			break;  		} @@ -1764,25 +1754,25 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,  		if (!(p & SR_IO)) {  			if (!((p & SR_MSG) && c > 1)) {  				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); -				NCR_PRINT(NDEBUG_PIO); +				NCR5380_dprint(NDEBUG_PIO, instance);  				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |  					      ICR_ASSERT_DATA | ICR_ASSERT_ACK);  			} else {  				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |  					      ICR_ASSERT_DATA | ICR_ASSERT_ATN); -				NCR_PRINT(NDEBUG_PIO); +				NCR5380_dprint(NDEBUG_PIO, instance);  				NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |  					      ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);  			}  		} else { -			NCR_PRINT(NDEBUG_PIO); +			NCR5380_dprint(NDEBUG_PIO, instance);  			NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);  		}  		while (NCR5380_read(STATUS_REG) & SR_REQ)  			; -		HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); +		dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);  		/*  		 * We have several special cases to consider during REQ/ACK handshaking : @@ -1803,7 +1793,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,  		}  	} while (--c); -	PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); +	dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);  	*count = c;  	*data = d; @@ -1917,7 +1907,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,  	if (atari_read_overruns && (p & SR_IO))  		c -= atari_read_overruns; -	DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", +	dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",  		   HOSTNO, (p & SR_IO) ? "reading" : "writing",  		   c, (p & SR_IO) ? "to" : "from", d); @@ -1997,7 +1987,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  			phase = (tmp & PHASE_MASK);  			if (phase != old_phase) {  				old_phase = phase; -				NCR_PRINT_PHASE(NDEBUG_INFORMATION); +				NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);  			}  			if (sink && (phase != PHASE_MSGOUT)) { @@ -2039,7 +2029,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  					 * they are at contiguous physical addresses.  					 */  					merge_contiguous_buffers(cmd); -					INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", +					dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",  						   HOSTNO, cmd->SCp.this_residual,  						   cmd->SCp.buffers_residual);  				} @@ -2123,7 +2113,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  					/* Accept message by clearing ACK */  					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); -					LNK_PRINTK("scsi%d: target %d lun %d linked command " +					dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "  						   "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);  					/* Enable reselect interrupts */ @@ -2148,7 +2138,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  					 * and don't free it! */  					cmd->next_link->tag = cmd->tag;  					cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); -					LNK_PRINTK("scsi%d: target %d lun %d linked request " +					dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "  						   "done, calling scsi_done().\n",  						   HOSTNO, cmd->device->id, cmd->device->lun);  #ifdef NCR5380_STATS @@ -2165,7 +2155,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  					/* ++guenther: possible race with Falcon locking */  					falcon_dont_release++;  					hostdata->connected = NULL; -					QU_PRINTK("scsi%d: command for target %d, lun %d " +					dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "  						  "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);  #ifdef SUPPORT_TAGS  					cmd_free_tag(cmd); @@ -2179,7 +2169,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  						/* ++Andreas: the mid level code knows about  						   QUEUE_FULL now. */  						TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; -						TAG_PRINTK("scsi%d: target %d lun %d returned " +						dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "  							   "QUEUE_FULL after %d commands\n",  							   HOSTNO, cmd->device->id, cmd->device->lun,  							   ta->nr_allocated); @@ -2224,14 +2214,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  					    (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {  						scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); -						ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); +						dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO);  						local_irq_save(flags);  						LIST(cmd,hostdata->issue_queue);  						SET_NEXT(cmd, hostdata->issue_queue);  						hostdata->issue_queue = (Scsi_Cmnd *) cmd;  						local_irq_restore(flags); -						QU_PRINTK("scsi%d: REQUEST SENSE added to head of " +						dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "  							  "issue queue\n", H_NO(cmd));  					} else  #endif /* def AUTOSENSE */ @@ -2277,7 +2267,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  						cmd->device->tagged_supported = 0;  						hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);  						cmd->tag = TAG_NONE; -						TAG_PRINTK("scsi%d: target %d lun %d rejected " +						dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "  							   "QUEUE_TAG message; tagged queuing "  							   "disabled\n",  							   HOSTNO, cmd->device->id, cmd->device->lun); @@ -2294,7 +2284,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  					hostdata->connected = NULL;  					hostdata->disconnected_queue = cmd;  					local_irq_restore(flags); -					QU_PRINTK("scsi%d: command for target %d lun %d was " +					dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "  						  "moved from connected to the "  						  "disconnected_queue\n", HOSTNO,  						  cmd->device->id, cmd->device->lun); @@ -2344,13 +2334,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  					/* Accept first byte by clearing ACK */  					NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); -					EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); +					dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);  					len = 2;  					data = extended_msg + 1;  					phase = PHASE_MSGIN;  					NCR5380_transfer_pio(instance, &phase, &len, &data); -					EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, +					dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,  						   (int)extended_msg[1], (int)extended_msg[2]);  					if (!len && extended_msg[1] <= @@ -2362,7 +2352,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  						phase = PHASE_MSGIN;  						NCR5380_transfer_pio(instance, &phase, &len, &data); -						EXT_PRINTK("scsi%d: message received, residual %d\n", +						dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",  							   HOSTNO, len);  						switch (extended_msg[2]) { @@ -2451,7 +2441,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)  				break;  			default:  				printk("scsi%d: unknown phase\n", HOSTNO); -				NCR_PRINT(NDEBUG_ANY); +				NCR5380_dprint(NDEBUG_ANY, instance);  			} /* switch(phase) */  		} /* if (tmp * SR_REQ) */  	} /* while (1) */ @@ -2493,7 +2483,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)  	target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); -	RSL_PRINTK("scsi%d: reselect\n", HOSTNO); +	dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);  	/*  	 * At this point, we have detected that our SCSI ID is on the bus, @@ -2544,7 +2534,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)  		if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&  		    msg[1] == SIMPLE_QUEUE_TAG)  			tag = msg[2]; -		TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " +		dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "  			   "reselection\n", HOSTNO, target_mask, lun, tag);  	}  #endif @@ -2598,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)  	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);  	hostdata->connected = tmp; -	RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", +	dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",  		   HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);  	falcon_dont_release--;  } @@ -2640,7 +2630,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)  		printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n",  		       HOSTNO); -	ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, +	dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,  		    NCR5380_read(BUS_AND_STATUS_REG),  		    NCR5380_read(STATUS_REG)); @@ -2653,7 +2643,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)  	if (hostdata->connected == cmd) { -		ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); +		dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);  		/*  		 * We should perform BSY checking, and make sure we haven't slipped  		 * into BUS FREE. @@ -2683,11 +2673,11 @@ int NCR5380_abort(Scsi_Cmnd *cmd)  			local_irq_restore(flags);  			cmd->scsi_done(cmd);  			falcon_release_lock_if_possible(hostdata); -			return SCSI_ABORT_SUCCESS; +			return SUCCESS;  		} else {  /*			local_irq_restore(flags); */  			printk("scsi%d: abort of connected command failed!\n", HOSTNO); -			return SCSI_ABORT_ERROR; +			return FAILED;  		}  	}  #endif @@ -2705,13 +2695,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd)  			SET_NEXT(tmp, NULL);  			tmp->result = DID_ABORT << 16;  			local_irq_restore(flags); -			ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", +			dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",  				    HOSTNO);  			/* Tagged queuing note: no tag to free here, hasn't been assigned  			 * yet... */  			tmp->scsi_done(tmp);  			falcon_release_lock_if_possible(hostdata); -			return SCSI_ABORT_SUCCESS; +			return SUCCESS;  		}  	} @@ -2728,8 +2718,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd)  	if (hostdata->connected) {  		local_irq_restore(flags); -		ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); -		return SCSI_ABORT_SNOOZE; +		dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); +		return FAILED;  	}  	/* @@ -2761,12 +2751,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd)  	     tmp = NEXT(tmp)) {  		if (cmd == tmp) {  			local_irq_restore(flags); -			ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); +			dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);  			if (NCR5380_select(instance, cmd, (int)cmd->tag)) -				return SCSI_ABORT_BUSY; +				return FAILED; -			ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); +			dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);  			do_abort(instance); @@ -2791,7 +2781,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)  					local_irq_restore(flags);  					tmp->scsi_done(tmp);  					falcon_release_lock_if_possible(hostdata); -					return SCSI_ABORT_SUCCESS; +					return SUCCESS;  				}  			}  		} @@ -2816,7 +2806,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)  	 */  	falcon_release_lock_if_possible(hostdata); -	return SCSI_ABORT_NOT_RUNNING; +	return FAILED;  } @@ -2825,7 +2815,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)   *   * Purpose : reset the SCSI bus.   * - * Returns : SCSI_RESET_WAKEUP + * Returns : SUCCESS or FAILURE   *   */ @@ -2834,7 +2824,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)  	SETUP_HOSTDATA(cmd->device->host);  	int i;  	unsigned long flags; -#if 1 +#if defined(RESET_RUN_DONE)  	Scsi_Cmnd *connected, *disconnected_queue;  #endif @@ -2859,7 +2849,14 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)  	 * through anymore ... */  	(void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); -#if 1	/* XXX Should now be done by midlevel code, but it's broken XXX */ +	/* MSch 20140115 - looking at the generic NCR5380 driver, all of this +	 * should go. +	 * Catch-22: if we don't clear all queues, the SCSI driver lock will +	 * not be reset by atari_scsi_reset()! +	 */ + +#if defined(RESET_RUN_DONE) +	/* XXX Should now be done by midlevel code, but it's broken XXX */  	/* XXX see below                                            XXX */  	/* MSch: old-style reset: actually abort all command processing here */ @@ -2890,7 +2887,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)  	 */  	if ((cmd = connected)) { -		ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); +		dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));  		cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);  		cmd->scsi_done(cmd);  	} @@ -2902,7 +2899,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)  		cmd->scsi_done(cmd);  	}  	if (i > 0) -		ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); +		dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);  	/* The Falcon lock should be released after a reset...  	 */ @@ -2915,7 +2912,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)  	 * the midlevel code that the reset was SUCCESSFUL, and there is no  	 * need to 'wake up' the commands by a request_sense  	 */ -	return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; +	return SUCCESS;  #else /* 1 */  	/* MSch: new-style reset handling: let the mid-level do what it can */ @@ -2942,11 +2939,11 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)  	 */  	if (hostdata->issue_queue) -		ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); +		dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));  	if (hostdata->connected) -		ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); +		dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));  	if (hostdata->disconnected_queue) -		ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); +		dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));  	local_irq_save(flags);  	hostdata->issue_queue = NULL; @@ -2963,6 +2960,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)  	local_irq_restore(flags);  	/* we did no complete reset of all commands, so a wakeup is required */ -	return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; +	return SUCCESS;  #endif /* 1 */  } diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index a3e6c8a3ff0..b522134528d 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -67,12 +67,6 @@  #include <linux/module.h> -#define NDEBUG (0) - -#define NDEBUG_ABORT		0x00100000 -#define NDEBUG_TAGS		0x00200000 -#define NDEBUG_MERGING		0x00400000 -  #define AUTOSENSE  /* For the Atari version, use only polled IO or REAL_DMA */  #define	REAL_DMA @@ -90,6 +84,7 @@  #include <linux/init.h>  #include <linux/nvram.h>  #include <linux/bitops.h> +#include <linux/wait.h>  #include <asm/setup.h>  #include <asm/atarihw.h> @@ -313,7 +308,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)  	dma_stat = tt_scsi_dma.dma_ctrl; -	INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n", +	dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n",  		   atari_scsi_host->host_no, dma_stat & 0xff);  	/* Look if it was the DMA that has interrupted: First possibility @@ -339,7 +334,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)  	if ((dma_stat & 0x02) && !(dma_stat & 0x40)) {  		atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); -		DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", +		dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",  			   atari_dma_residual);  		if ((signed int)atari_dma_residual < 0) @@ -370,7 +365,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)  			 * other command.  These shouldn't disconnect anyway.  			 */  			if (atari_dma_residual & 0x1ff) { -				DMA_PRINTK("SCSI DMA: DMA bug corrected, " +				dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, "  					   "difference %ld bytes\n",  					   512 - (atari_dma_residual & 0x1ff));  				atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff; @@ -437,7 +432,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy)  			       "ST-DMA fifo\n", transferred & 15);  		atari_dma_residual = HOSTDATA_DMALEN - transferred; -		DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", +		dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",  			   atari_dma_residual);  	} else  		atari_dma_residual = 0; @@ -473,11 +468,11 @@ static void atari_scsi_fetch_restbytes(void)  		/* there are 'nr' bytes left for the last long address  		   before the DMA pointer */  		phys_dst ^= nr; -		DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx", +		dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx",  			   nr, phys_dst);  		/* The content of the DMA pointer is a physical address!  */  		dst = phys_to_virt(phys_dst); -		DMA_PRINTK(" = virt addr %p\n", dst); +		dprintk(NDEBUG_DMA, " = virt addr %p\n", dst);  		for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr)  			*dst++ = *src++;  	} @@ -549,8 +544,10 @@ static void falcon_get_lock(void)  	local_irq_save(flags); -	while (!in_irq() && falcon_got_lock && stdma_others_waiting()) -		sleep_on(&falcon_fairness_wait); +	wait_event_cmd(falcon_fairness_wait, +		in_interrupt() || !falcon_got_lock || !stdma_others_waiting(), +		local_irq_restore(flags), +		local_irq_save(flags));  	while (!falcon_got_lock) {  		if (in_irq()) @@ -562,7 +559,10 @@ static void falcon_get_lock(void)  			falcon_trying_lock = 0;  			wake_up(&falcon_try_wait);  		} else { -			sleep_on(&falcon_try_wait); +			wait_event_cmd(falcon_try_wait, +				falcon_got_lock && !falcon_trying_lock, +				local_irq_restore(flags), +				local_irq_save(flags));  		}  	} @@ -633,7 +633,7 @@ static int __init atari_scsi_detect(struct scsi_host_template *host)  					"double buffer\n");  			return 0;  		} -		atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer); +		atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer);  		atari_dma_orig_addr = 0;  	}  #endif @@ -821,7 +821,7 @@ static int atari_scsi_bus_reset(Scsi_Cmnd *cmd)  	} else {  		atari_turnon_irq(IRQ_MFP_FSCSI);  	} -	if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) +	if (rv == SUCCESS)  		falcon_release_lock_if_possible(hostdata);  	return rv; @@ -877,7 +877,7 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,  {  	unsigned long addr = virt_to_phys(data); -	DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " +	dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "  		   "dir = %d\n", instance->host_no, data, addr, count, dir);  	if (!IS_A_TT() && !STRAM_ADDR(addr)) { @@ -1057,7 +1057,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,  		possible_len = limit;  	if (possible_len != wanted_len) -		DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " +		dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "  			   "instead of %ld\n", possible_len, wanted_len);  	return possible_len; diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h index 11c624bb122..3299d91d733 100644 --- a/drivers/scsi/atari_scsi.h +++ b/drivers/scsi/atari_scsi.h @@ -54,125 +54,6 @@  #define	NCR5380_dma_xfer_len(i,cmd,phase) \  	atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) -/* former generic SCSI error handling stuff */ - -#define SCSI_ABORT_SNOOZE 0 -#define SCSI_ABORT_SUCCESS 1 -#define SCSI_ABORT_PENDING 2 -#define SCSI_ABORT_BUSY 3 -#define SCSI_ABORT_NOT_RUNNING 4 -#define SCSI_ABORT_ERROR 5 - -#define SCSI_RESET_SNOOZE 0 -#define SCSI_RESET_PUNT 1 -#define SCSI_RESET_SUCCESS 2 -#define SCSI_RESET_PENDING 3 -#define SCSI_RESET_WAKEUP 4 -#define SCSI_RESET_NOT_RUNNING 5 -#define SCSI_RESET_ERROR 6 - -#define SCSI_RESET_SYNCHRONOUS		0x01 -#define SCSI_RESET_ASYNCHRONOUS		0x02 -#define SCSI_RESET_SUGGEST_BUS_RESET	0x04 -#define SCSI_RESET_SUGGEST_HOST_RESET	0x08 - -#define SCSI_RESET_BUS_RESET 0x100 -#define SCSI_RESET_HOST_RESET 0x200 -#define SCSI_RESET_ACTION   0xff - -/* Debugging printk definitions: - * - *  ARB  -> arbitration - *  ASEN -> auto-sense - *  DMA  -> DMA - *  HSH  -> PIO handshake - *  INF  -> information transfer - *  INI  -> initialization - *  INT  -> interrupt - *  LNK  -> linked commands - *  MAIN -> NCR5380_main() control flow - *  NDAT -> no data-out phase - *  NWR  -> no write commands - *  PIO  -> PIO transfers - *  PDMA -> pseudo DMA (unused on Atari) - *  QU   -> queues - *  RSL  -> reselections - *  SEL  -> selections - *  USL  -> usleep cpde (unused on Atari) - *  LBS  -> last byte sent (unused on Atari) - *  RSS  -> restarting of selections - *  EXT  -> extended messages - *  ABRT -> aborting and resetting - *  TAG  -> queue tag handling - *  MER  -> merging of consec. buffers - * - */ - -#define dprint(flg, format...)			\ -({						\ -	if (NDEBUG & (flg))			\ -		printk(KERN_DEBUG format);	\ -}) - -#define ARB_PRINTK(format, args...) \ -	dprint(NDEBUG_ARBITRATION, format , ## args) -#define ASEN_PRINTK(format, args...) \ -	dprint(NDEBUG_AUTOSENSE, format , ## args) -#define DMA_PRINTK(format, args...) \ -	dprint(NDEBUG_DMA, format , ## args) -#define HSH_PRINTK(format, args...) \ -	dprint(NDEBUG_HANDSHAKE, format , ## args) -#define INF_PRINTK(format, args...) \ -	dprint(NDEBUG_INFORMATION, format , ## args) -#define INI_PRINTK(format, args...) \ -	dprint(NDEBUG_INIT, format , ## args) -#define INT_PRINTK(format, args...) \ -	dprint(NDEBUG_INTR, format , ## args) -#define LNK_PRINTK(format, args...) \ -	dprint(NDEBUG_LINKED, format , ## args) -#define MAIN_PRINTK(format, args...) \ -	dprint(NDEBUG_MAIN, format , ## args) -#define NDAT_PRINTK(format, args...) \ -	dprint(NDEBUG_NO_DATAOUT, format , ## args) -#define NWR_PRINTK(format, args...) \ -	dprint(NDEBUG_NO_WRITE, format , ## args) -#define PIO_PRINTK(format, args...) \ -	dprint(NDEBUG_PIO, format , ## args) -#define PDMA_PRINTK(format, args...) \ -	dprint(NDEBUG_PSEUDO_DMA, format , ## args) -#define QU_PRINTK(format, args...) \ -	dprint(NDEBUG_QUEUES, format , ## args) -#define RSL_PRINTK(format, args...) \ -	dprint(NDEBUG_RESELECTION, format , ## args) -#define SEL_PRINTK(format, args...) \ -	dprint(NDEBUG_SELECTION, format , ## args) -#define USL_PRINTK(format, args...) \ -	dprint(NDEBUG_USLEEP, format , ## args) -#define LBS_PRINTK(format, args...) \ -	dprint(NDEBUG_LAST_BYTE_SENT, format , ## args) -#define RSS_PRINTK(format, args...) \ -	dprint(NDEBUG_RESTART_SELECT, format , ## args) -#define EXT_PRINTK(format, args...) \ -	dprint(NDEBUG_EXTENDED, format , ## args) -#define ABRT_PRINTK(format, args...) \ -	dprint(NDEBUG_ABORT, format , ## args) -#define TAG_PRINTK(format, args...) \ -	dprint(NDEBUG_TAGS, format , ## args) -#define MER_PRINTK(format, args...) \ -	dprint(NDEBUG_MERGING, format , ## args) - -/* conditional macros for NCR5380_print_{,phase,status} */ - -#define NCR_PRINT(mask)	\ -	((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0) - -#define NCR_PRINT_PHASE(mask) \ -	((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0) - -#define NCR_PRINT_STATUS(mask) \ -	((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0) - -  #endif /* ndef ASM */  #endif /* ATARI_SCSI_H */ diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index 15a629d8ed0..a795d81ef87 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -3144,8 +3144,6 @@ static void atp870u_remove (struct pci_dev *pdev)  	atp870u_free_tables(pshost);  	printk(KERN_INFO "scsi_host_put : %p\n",pshost);  	scsi_host_put(pshost); -	printk(KERN_INFO "pci_set_drvdata : %p\n",pdev); -	pci_set_drvdata(pdev, NULL);	  }  MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h index 777e7c0bbb4..860f527d8f2 100644 --- a/drivers/scsi/be2iscsi/be.h +++ b/drivers/scsi/be2iscsi/be.h @@ -83,9 +83,20 @@ static inline void queue_tail_inc(struct be_queue_info *q)  /*ISCSI */ +struct be_aic_obj {		/* Adaptive interrupt coalescing (AIC) info */ +	bool enable; +	u32 min_eqd;		/* in usecs */ +	u32 max_eqd;		/* in usecs */ +	u32 prev_eqd;		/* in usecs */ +	u32 et_eqd;		/* configured val when aic is off */ +	ulong jiffs; +	u64 eq_prev;		/* Used to calculate eqe */ +}; +  struct be_eq_obj {  	bool todo_mcc_cq;  	bool todo_cq; +	u32 cq_count;  	struct be_queue_info q;  	struct beiscsi_hba *phba;  	struct be_queue_info *cq; @@ -98,6 +109,14 @@ struct be_mcc_obj {  	struct be_queue_info cq;  }; +struct beiscsi_mcc_tag_state { +#define MCC_TAG_STATE_COMPLETED 0x00 +#define MCC_TAG_STATE_RUNNING   0x01 +#define MCC_TAG_STATE_TIMEOUT   0x02 +	uint8_t tag_state; +	struct be_dma_mem tag_mem_state; +}; +  struct be_ctrl_info {  	u8 __iomem *csr;  	u8 __iomem *db;		/* Door Bell */ @@ -122,13 +141,16 @@ struct be_ctrl_info {  	unsigned short mcc_alloc_index;  	unsigned short mcc_free_index;  	unsigned int mcc_tag_available; + +	struct beiscsi_mcc_tag_state ptag_state[MAX_MCC_CMD + 1];  };  #include "be_cmds.h"  #define PAGE_SHIFT_4K 12  #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) -#define mcc_timeout		120000 /* 5s timeout */ +#define mcc_timeout		120000 /* 12s timeout */ +#define BEISCSI_LOGOUT_SYNC_DELAY	250  /* Returns number of pages spanned by the data starting at the given addr */  #define PAGES_4K_SPANNED(_address, size)				\ diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index e66aa7c11a8..1432ed5e9fc 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -17,9 +17,9 @@  #include <scsi/iscsi_proto.h> +#include "be_main.h"  #include "be.h"  #include "be_mgmt.h" -#include "be_main.h"  int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)  { @@ -138,7 +138,7 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)   * @phba: Driver private structure   * @tag: Tag for the MBX Command   * @wrb: the WRB used for the MBX Command - * @cmd_hdr: IOCTL Hdr for the MBX Cmd + * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd   *   * Waits for MBX completion with the passed TAG.   * @@ -148,18 +148,25 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)   **/  int beiscsi_mccq_compl(struct beiscsi_hba *phba,  		uint32_t tag, struct be_mcc_wrb **wrb, -		void *cmd_hdr) +		struct be_dma_mem *mbx_cmd_mem)  {  	int rc = 0;  	uint32_t mcc_tag_response;  	uint16_t status = 0, addl_status = 0, wrb_num = 0;  	struct be_mcc_wrb *temp_wrb; -	struct be_cmd_req_hdr *ioctl_hdr; -	struct be_cmd_resp_hdr *ioctl_resp_hdr; +	struct be_cmd_req_hdr *mbx_hdr; +	struct be_cmd_resp_hdr *mbx_resp_hdr;  	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; -	if (beiscsi_error(phba)) -		return -EIO; +	if (beiscsi_error(phba)) { +		free_mcc_tag(&phba->ctrl, tag); +		return -EPERM; +	} + +	/* Set MBX Tag state to Active */ +	spin_lock(&phba->ctrl.mbox_lock); +	phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_RUNNING; +	spin_unlock(&phba->ctrl.mbox_lock);  	/* wait for the mccq completion */  	rc = wait_event_interruptible_timeout( @@ -169,49 +176,76 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba,  				BEISCSI_HOST_MBX_TIMEOUT));  	if (rc <= 0) { +		struct be_dma_mem *tag_mem; +		/* Set MBX Tag state to timeout */ +		spin_lock(&phba->ctrl.mbox_lock); +		phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_TIMEOUT; +		spin_unlock(&phba->ctrl.mbox_lock); + +		/* Store resource addr to be freed later */ +		tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state; +		if (mbx_cmd_mem) { +			tag_mem->size = mbx_cmd_mem->size; +			tag_mem->va = mbx_cmd_mem->va; +			tag_mem->dma = mbx_cmd_mem->dma; +		} else +			tag_mem->size = 0; +  		beiscsi_log(phba, KERN_ERR,  			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |  			    BEISCSI_LOG_CONFIG,  			    "BC_%d : MBX Cmd Completion timed out\n"); -		rc = -EAGAIN; -		goto release_mcc_tag; -	} else +		return -EBUSY; +	} else {  		rc = 0; +		/* Set MBX Tag state to completed */ +		spin_lock(&phba->ctrl.mbox_lock); +		phba->ctrl.ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED; +		spin_unlock(&phba->ctrl.mbox_lock); +	}  	mcc_tag_response = phba->ctrl.mcc_numtag[tag];  	status = (mcc_tag_response & CQE_STATUS_MASK);  	addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>  			CQE_STATUS_ADDL_SHIFT); -	if (cmd_hdr) { -		ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr; +	if (mbx_cmd_mem) { +		mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;  	} else {  		wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>  			   CQE_STATUS_WRB_SHIFT;  		temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num); -		ioctl_hdr = embedded_payload(temp_wrb); +		mbx_hdr = embedded_payload(temp_wrb);  		if (wrb)  			*wrb = temp_wrb;  	}  	if (status || addl_status) { -		beiscsi_log(phba, KERN_ERR, +		beiscsi_log(phba, KERN_WARNING,  			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |  			    BEISCSI_LOG_CONFIG,  			    "BC_%d : MBX Cmd Failed for "  			    "Subsys : %d Opcode : %d with "  			    "Status : %d and Extd_Status : %d\n", -			    ioctl_hdr->subsystem, -			    ioctl_hdr->opcode, +			    mbx_hdr->subsystem, +			    mbx_hdr->opcode,  			    status, addl_status);  		if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { -			ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; -			if (ioctl_resp_hdr->response_length) -				goto release_mcc_tag; +			mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr; +			beiscsi_log(phba, KERN_WARNING, +				    BEISCSI_LOG_INIT | BEISCSI_LOG_EH | +				    BEISCSI_LOG_CONFIG, +				    "BC_%d : Insufficent Buffer Error " +				    "Resp_Len : %d Actual_Resp_Len : %d\n", +				    mbx_resp_hdr->response_length, +				    mbx_resp_hdr->actual_resp_len); + +			rc = -EAGAIN; +			goto release_mcc_tag;  		} -		rc = -EAGAIN; +		rc = -EIO;  	}  release_mcc_tag: @@ -305,6 +339,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,  int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,  				    struct be_mcc_compl *compl)  { +	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);  	u16 compl_status, extd_status;  	unsigned short tag; @@ -324,7 +359,32 @@ int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,  	ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);  	ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;  	ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF); -	wake_up_interruptible(&ctrl->mcc_wait[tag]); + +	if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_RUNNING) { +		wake_up_interruptible(&ctrl->mcc_wait[tag]); +	} else if (ctrl->ptag_state[tag].tag_state == MCC_TAG_STATE_TIMEOUT) { +		struct be_dma_mem *tag_mem; +		tag_mem = &ctrl->ptag_state[tag].tag_mem_state; + +		beiscsi_log(phba, KERN_WARNING, +			    BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT | +			    BEISCSI_LOG_CONFIG, +			    "BC_%d : MBX Completion for timeout Command " +			    "from FW\n"); +		/* Check if memory needs to be freed */ +		if (tag_mem->size) +			pci_free_consistent(ctrl->pdev, tag_mem->size, +					    tag_mem->va, tag_mem->dma); + +		/* Change tag state */ +		spin_lock(&phba->ctrl.mbox_lock); +		ctrl->ptag_state[tag].tag_state = MCC_TAG_STATE_COMPLETED; +		spin_unlock(&phba->ctrl.mbox_lock); + +		/* Free MCC Tag */ +		free_mcc_tag(ctrl, tag); +	} +  	return 0;  } @@ -340,8 +400,23 @@ static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)  	return NULL;  } -static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) +/** + * be2iscsi_fail_session(): Closing session with appropriate error + * @cls_session: ptr to session + * + * Depending on adapter state appropriate error flag is passed. + **/ +void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)  { +	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); +	struct beiscsi_hba *phba = iscsi_host_priv(shost); +	uint32_t iscsi_err_flag; + +	if (phba->state & BE_ADAPTER_STATE_SHUTDOWN) +		iscsi_err_flag = ISCSI_ERR_INVALID_HOST; +	else +		iscsi_err_flag = ISCSI_ERR_CONN_FAILED; +  	iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);  } @@ -363,7 +438,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,  	} else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||  		    ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&  		     (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { -		phba->state = BE_ADAPTER_UP; +		phba->state = BE_ADAPTER_LINK_UP;  		beiscsi_log(phba, KERN_ERR,  			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, @@ -372,18 +447,6 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,  	}  } -static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm, -		       u16 num_popped) -{ -	u32 val = 0; -	val |= qid & DB_CQ_RING_ID_MASK; -	if (arm) -		val |= 1 << DB_CQ_REARM_SHIFT; -	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT; -	iowrite32(val, phba->db_va + DB_CQ_OFFSET); -} - -  int beiscsi_process_mcc(struct beiscsi_hba *phba)  {  	struct be_mcc_compl *compl; @@ -414,7 +477,7 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)  	}  	if (num) -		beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num); +		hwi_ring_cq_db(phba, phba->ctrl.mcc_obj.cq.id, num, 1, 0);  	spin_unlock_bh(&phba->ctrl.mcc_cq_lock);  	return status; @@ -486,33 +549,47 @@ int be_mcc_notify_wait(struct beiscsi_hba *phba)   **/  static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)  { +#define BEISCSI_MBX_RDY_BIT_TIMEOUT	4000	/* 4sec */  	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;  	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); -	uint32_t wait = 0; +	unsigned long timeout; +	bool read_flag = false; +	int ret = 0, i;  	u32 ready; +	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(rdybit_check_q); -	do { +	if (beiscsi_error(phba)) +		return -EIO; -		if (beiscsi_error(phba)) -			return -EIO; +	timeout = jiffies + (HZ * 110); -		ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; -		if (ready) -			break; +	do { +		for (i = 0; i < BEISCSI_MBX_RDY_BIT_TIMEOUT; i++) { +			ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK; +			if (ready) { +				read_flag = true; +				break; +			} +			mdelay(1); +		} -		if (wait > BEISCSI_HOST_MBX_TIMEOUT) { -			beiscsi_log(phba, KERN_ERR, -				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, -				    "BC_%d : FW Timed Out\n"); +		if (!read_flag) { +			wait_event_timeout(rdybit_check_q, +					  (read_flag != true), +					   HZ * 5); +		} +	} while ((time_before(jiffies, timeout)) && !read_flag); + +	if (!read_flag) { +		beiscsi_log(phba, KERN_ERR, +			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, +			    "BC_%d : FW Timed Out\n");  			phba->fw_timeout = true;  			beiscsi_ue_detect(phba); -			return -EBUSY; -		} +			ret = -EBUSY; +	} -		mdelay(1); -		wait++; -	} while (true); -	return 0; +	return ret;  }  /* @@ -699,7 +776,7 @@ struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)  	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;  	struct be_mcc_wrb *wrb; -	BUG_ON(atomic_read(&mccq->used) >= mccq->len); +	WARN_ON(atomic_read(&mccq->used) >= mccq->len);  	wrb = queue_head_node(mccq);  	memset(wrb, 0, sizeof(*wrb));  	wrb->tag0 = (mccq->head & 0x000000FF) << 16; @@ -1009,10 +1086,29 @@ int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,  	return status;  } +/** + * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter + * @ctrl: ptr to ctrl_info + * @cq: Completion Queue + * @dq: Default Queue + * @lenght: ring size + * @entry_size: size of each entry in DEFQ + * @is_header: Header or Data DEFQ + * @ulp_num: Bind to which ULP + * + * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted + * on this queue by the FW + * + * return + *	Success: 0 + *	Failure: Non-Zero Value + * + **/  int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,  				    struct be_queue_info *cq,  				    struct be_queue_info *dq, int length, -				    int entry_size) +				    int entry_size, uint8_t is_header, +				    uint8_t ulp_num)  {  	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);  	struct be_defq_create_req *req = embedded_payload(wrb); @@ -1030,6 +1126,11 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,  			   OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));  	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); +	if (phba->fw_config.dual_ulp_aware) { +		req->ulp_num = ulp_num; +		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); +		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); +	}  	if (is_chip_be2_be3r(phba)) {  		AMAP_SET_BITS(struct amap_be_default_pdu_context, @@ -1067,22 +1168,53 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,  	status = be_mbox_notify(ctrl);  	if (!status) { +		struct be_ring *defq_ring;  		struct be_defq_create_resp *resp = embedded_payload(wrb);  		dq->id = le16_to_cpu(resp->id);  		dq->created = true; +		if (is_header) +			defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num]; +		else +			defq_ring = &phba->phwi_ctrlr-> +				    default_pdu_data[ulp_num]; + +		defq_ring->id = dq->id; + +		if (!phba->fw_config.dual_ulp_aware) { +			defq_ring->ulp_num = BEISCSI_ULP0; +			defq_ring->doorbell_offset = DB_RXULP0_OFFSET; +		} else { +			defq_ring->ulp_num = resp->ulp_num; +			defq_ring->doorbell_offset = resp->doorbell_offset; +		}  	}  	spin_unlock(&ctrl->mbox_lock);  	return status;  } -int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, -		       struct be_queue_info *wrbq) +/** + * be_cmd_wrbq_create()- Create WRBQ + * @ctrl: ptr to ctrl_info + * @q_mem: memory details for the queue + * @wrbq: queue info + * @pwrb_context: ptr to wrb_context + * @ulp_num: ULP on which the WRBQ is to be created + * + * Create WRBQ on the passed ULP_NUM. + * + **/ +int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, +			struct be_dma_mem *q_mem, +			struct be_queue_info *wrbq, +			struct hwi_wrb_context *pwrb_context, +			uint8_t ulp_num)  {  	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);  	struct be_wrbq_create_req *req = embedded_payload(wrb);  	struct be_wrbq_create_resp *resp = embedded_payload(wrb); +	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);  	int status;  	spin_lock(&ctrl->mbox_lock); @@ -1093,17 +1225,78 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,  	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,  		OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));  	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); + +	if (phba->fw_config.dual_ulp_aware) { +		req->ulp_num = ulp_num; +		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); +		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); +	} +  	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);  	status = be_mbox_notify(ctrl);  	if (!status) {  		wrbq->id = le16_to_cpu(resp->cid);  		wrbq->created = true; + +		pwrb_context->cid = wrbq->id; +		if (!phba->fw_config.dual_ulp_aware) { +			pwrb_context->doorbell_offset = DB_TXULP0_OFFSET; +			pwrb_context->ulp_num = BEISCSI_ULP0; +		} else { +			pwrb_context->ulp_num = resp->ulp_num; +			pwrb_context->doorbell_offset = resp->doorbell_offset; +		}  	}  	spin_unlock(&ctrl->mbox_lock);  	return status;  } +int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, +				    struct be_dma_mem *q_mem) +{ +	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); +	struct be_post_template_pages_req *req = embedded_payload(wrb); +	int status; + +	spin_lock(&ctrl->mbox_lock); + +	memset(wrb, 0, sizeof(*wrb)); +	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); +	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, +			   OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS, +			   sizeof(*req)); + +	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); +	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; +	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); + +	status = be_mbox_notify(ctrl); +	spin_unlock(&ctrl->mbox_lock); +	return status; +} + +int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl) +{ +	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); +	struct be_remove_template_pages_req *req = embedded_payload(wrb); +	int status; + +	spin_lock(&ctrl->mbox_lock); + +	memset(wrb, 0, sizeof(*wrb)); +	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); +	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, +			   OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS, +			   sizeof(*req)); + +	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; + +	status = be_mbox_notify(ctrl); +	spin_unlock(&ctrl->mbox_lock); +	return status; +} +  int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,  				struct be_dma_mem *q_mem,  				u32 page_offset, u32 num_pages) diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 99073086dfe..cc7405c0eca 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -40,6 +40,7 @@ struct be_mcc_wrb {  	u32 tag1;		/* dword 3 */  	u32 rsvd;		/* dword 4 */  	union { +#define EMBED_MBX_MAX_PAYLOAD_SIZE  220  		u8 embedded_payload[236];	/* used by embedded cmds */  		struct be_sge sgl[19];	/* used by non-embedded cmds */  	} payload; @@ -70,6 +71,7 @@ struct be_mcc_wrb {  #define BEISCSI_FW_MBX_TIMEOUT	100  /* MBOX Command VER */ +#define MBX_CMD_VER1	0x01  #define MBX_CMD_VER2	0x02  struct be_mcc_compl { @@ -102,7 +104,7 @@ struct be_mcc_compl {  /********** MCC door bell ************/  #define DB_MCCQ_OFFSET 0x140 -#define DB_MCCQ_RING_ID_MASK 0x7FF		/* bits 0 - 10 */ +#define DB_MCCQ_RING_ID_MASK 0xFFFF		/* bits 0 - 15 */  /* Number of entries posted */  #define DB_MCCQ_NUM_POSTED_SHIFT 16		/* bits 16 - 29 */ @@ -162,6 +164,8 @@ struct be_mcc_mailbox {  #define OPCODE_COMMON_CQ_CREATE				12  #define OPCODE_COMMON_EQ_CREATE				13  #define OPCODE_COMMON_MCC_CREATE			21 +#define OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS	24 +#define OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS	25  #define OPCODE_COMMON_GET_CNTL_ATTRIBUTES		32  #define OPCODE_COMMON_GET_FW_VERSION			35  #define OPCODE_COMMON_MODIFY_EQ_DELAY			41 @@ -217,6 +221,10 @@ struct phys_addr {  	u32 hi;  }; +struct virt_addr { +	u32 lo; +	u32 hi; +};  /**************************   * BE Command definitions *   **************************/ @@ -264,6 +272,12 @@ struct be_cmd_resp_eq_create {  	u16 rsvd0;		/* sword */  } __packed; +struct be_set_eqd { +	u32 eq_id; +	u32 phase; +	u32 delay_multiplier; +} __packed; +  struct mgmt_chap_format {  	u32 flags;  	u8  intr_chap_name[256]; @@ -615,7 +629,7 @@ struct be_cmd_req_modify_eq_delay {  		u32 eq_id;  		u32 phase;  		u32 delay_multiplier; -	} delay[8]; +	} delay[MAX_CPUS];  } __packed;  /******************** Get MAC ADDR *******************/ @@ -701,8 +715,11 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);  void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag); +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, +			    int num);  int beiscsi_mccq_compl(struct beiscsi_hba *phba, -			uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); +			uint32_t tag, struct be_mcc_wrb **wrb, +			struct be_dma_mem *mbx_cmd_mem);  /*ISCSI Functuions */  int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);  int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); @@ -722,7 +739,13 @@ int be_mbox_notify(struct be_ctrl_info *ctrl);  int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,  				    struct be_queue_info *cq,  				    struct be_queue_info *dq, int length, -				    int entry_size); +				    int entry_size, uint8_t is_header, +				    uint8_t ulp_num); + +int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl, +				    struct be_dma_mem *q_mem); + +int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl);  int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,  				struct be_dma_mem *q_mem, u32 page_offset, @@ -731,7 +754,9 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,  int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);  int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, -		       struct be_queue_info *wrbq); +		       struct be_queue_info *wrbq, +		       struct hwi_wrb_context *pwrb_context, +		       uint8_t ulp_num);  bool is_link_state_evt(u32 trailer); @@ -776,7 +801,9 @@ struct be_defq_create_req {  	struct be_cmd_req_hdr hdr;  	u16 num_pages;  	u8 ulp_num; -	u8 rsvd0; +#define BEISCSI_DUAL_ULP_AWARE_BIT	0	/* Byte 3 - Bit 0 */ +#define BEISCSI_BIND_Q_TO_ULP_BIT	1	/* Byte 3 - Bit 1 */ +	u8 dua_feature;  	struct be_default_pdu_context context;  	struct phys_addr pages[8];  } __packed; @@ -784,6 +811,27 @@ struct be_defq_create_req {  struct be_defq_create_resp {  	struct be_cmd_req_hdr hdr;  	u16 id; +	u8 rsvd0; +	u8 ulp_num; +	u32 doorbell_offset; +	u16 register_set; +	u16 doorbell_format; +} __packed; + +struct be_post_template_pages_req { +	struct be_cmd_req_hdr hdr; +	u16 num_pages; +#define BEISCSI_TEMPLATE_HDR_TYPE_ISCSI	0x1 +	u16 type; +	struct phys_addr scratch_pa; +	struct virt_addr scratch_va; +	struct virt_addr pages_va; +	struct phys_addr pages[16]; +} __packed; + +struct be_remove_template_pages_req { +	struct be_cmd_req_hdr hdr; +	u16 type;  	u16 rsvd0;  } __packed; @@ -800,14 +848,18 @@ struct be_wrbq_create_req {  	struct be_cmd_req_hdr hdr;  	u16 num_pages;  	u8 ulp_num; -	u8 rsvd0; +	u8 dua_feature;  	struct phys_addr pages[8];  } __packed;  struct be_wrbq_create_resp {  	struct be_cmd_resp_hdr resp_hdr;  	u16 cid; -	u16 rsvd0; +	u8 rsvd0; +	u8 ulp_num; +	u32 doorbell_offset; +	u16 register_set; +	u16 doorbell_format;  } __packed;  #define SOL_CID_MASK		0x0000FFC0 @@ -962,6 +1014,26 @@ struct tcp_connect_and_offload_in {  	u8 rsvd0[3];  } __packed; +struct tcp_connect_and_offload_in_v1 { +	struct be_cmd_req_hdr hdr; +	struct ip_addr_format ip_address; +	u16 tcp_port; +	u16 cid; +	u16 cq_id; +	u16 defq_id; +	struct phys_addr dataout_template_pa; +	u16 hdr_ring_id; +	u16 data_ring_id; +	u8 do_offload; +	u8 ifd_state; +	u8 rsvd0[2]; +	u16 tcp_window_size; +	u8 tcp_window_scale_count; +	u8 rsvd1; +	u32 tcp_mss:24; +	u8 rsvd2; +} __packed; +  struct tcp_connect_and_offload_out {  	struct be_cmd_resp_hdr hdr;  	u32 connection_handle; @@ -975,8 +1047,8 @@ struct be_mcc_wrb_context {  	int *users_final_status;  } __packed; -#define DB_DEF_PDU_RING_ID_MASK		0x3FF	/* bits 0 - 9 */ -#define DB_DEF_PDU_CQPROC_MASK		0x3FFF	/* bits 0 - 9 */ +#define DB_DEF_PDU_RING_ID_MASK	0x3FFF	/* bits 0 - 13 */ +#define DB_DEF_PDU_CQPROC_MASK		0x3FFF	/* bits 16 - 29 */  #define DB_DEF_PDU_REARM_SHIFT		14  #define DB_DEF_PDU_EVENT_SHIFT		15  #define DB_DEF_PDU_CQPROC_SHIFT		16 @@ -1002,6 +1074,7 @@ union tcp_upload_params {  } __packed;  struct be_ulp_fw_cfg { +#define BEISCSI_ULP_ISCSI_INI_MODE	0x10  	u32 ulp_mode;  	u32 etx_base;  	u32 etx_count; @@ -1017,14 +1090,26 @@ struct be_ulp_fw_cfg {  	u32 icd_count;  }; +struct be_ulp_chain_icd { +	u32 chain_base; +	u32 chain_count; +}; +  struct be_fw_cfg {  	struct be_cmd_req_hdr hdr;  	u32 be_config_number;  	u32 asic_revision;  	u32 phys_port; +#define BEISCSI_FUNC_ISCSI_INI_MODE	0x10 +#define BEISCSI_FUNC_DUA_MODE	0x800  	u32 function_mode;  	struct be_ulp_fw_cfg ulp[2];  	u32 function_caps; +	u32 cqid_base; +	u32 cqid_count; +	u32 eqid_base; +	u32 eqid_count; +	struct be_ulp_chain_icd chain_icd[2];  } __packed;  struct be_cmd_get_all_if_id_req { @@ -1262,4 +1347,5 @@ void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,  void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,  			u8 subsystem, u8 opcode, int cmd_len); +void be2iscsi_fail_session(struct iscsi_cls_session *cls_session);  #endif /* !BEISCSI_CMDS_H */ diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index ef36be003f6..fd284ff36ec 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -58,10 +58,15 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,  	}  	beiscsi_ep = ep->dd_data;  	phba = beiscsi_ep->phba; -	shost = phba->shost; -	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, -		    "BS_%d : In beiscsi_session_create\n"); +	if (phba->state & BE_ADAPTER_PCI_ERR) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, +			    "BS_%d : PCI_ERROR Recovery\n"); +		return NULL; +	} else { +		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, +			    "BS_%d : In beiscsi_session_create\n"); +	}  	if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, @@ -74,6 +79,7 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,  		cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;  	} +	shost = phba->shost;  	cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,  					  shost, cmds_max,  					  sizeof(*beiscsi_sess), @@ -194,6 +200,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,  	struct beiscsi_conn *beiscsi_conn = conn->dd_data;  	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);  	struct beiscsi_hba *phba = iscsi_host_priv(shost); +	struct hwi_controller *phwi_ctrlr = phba->phwi_ctrlr; +	struct hwi_wrb_context *pwrb_context;  	struct beiscsi_endpoint *beiscsi_ep;  	struct iscsi_endpoint *ep; @@ -214,9 +222,13 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,  		return -EEXIST;  	} +	pwrb_context = &phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID( +						beiscsi_ep->ep_cid)]; +  	beiscsi_conn->beiscsi_conn_cid = beiscsi_ep->ep_cid;  	beiscsi_conn->ep = beiscsi_ep;  	beiscsi_ep->conn = beiscsi_conn; +	beiscsi_conn->doorbell_offset = pwrb_context->doorbell_offset;  	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,  		    "BS_%d : beiscsi_conn=%p conn=%p ep_cid=%d\n", @@ -265,13 +277,17 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)  void beiscsi_create_def_ifaces(struct beiscsi_hba *phba)  { -	struct be_cmd_get_if_info_resp if_info; +	struct be_cmd_get_if_info_resp *if_info; -	if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) +	if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) {  		beiscsi_create_ipv4_iface(phba); +		kfree(if_info); +	} -	if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) +	if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) {  		beiscsi_create_ipv6_iface(phba); +		kfree(if_info); +	}  }  void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba) @@ -467,6 +483,12 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,  	uint32_t rm_len = dt_len;  	int ret = 0 ; +	if (phba->state & BE_ADAPTER_PCI_ERR) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, +			    "BS_%d : In PCI_ERROR Recovery\n"); +		return -EBUSY; +	} +  	nla_for_each_attr(attrib, data, dt_len, rm_len) {  		iface_param = nla_data(attrib); @@ -512,11 +534,9 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,  		struct iscsi_iface *iface, int param,  		char *buf)  { -	struct be_cmd_get_if_info_resp if_info; +	struct be_cmd_get_if_info_resp *if_info;  	int len, ip_type = BE2_IPV4; -	memset(&if_info, 0, sizeof(if_info)); -  	if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)  		ip_type = BE2_IPV6; @@ -526,45 +546,46 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,  	switch (param) {  	case ISCSI_NET_PARAM_IPV4_ADDR: -		len = sprintf(buf, "%pI4\n", &if_info.ip_addr.addr); +		len = sprintf(buf, "%pI4\n", if_info->ip_addr.addr);  		break;  	case ISCSI_NET_PARAM_IPV6_ADDR: -		len = sprintf(buf, "%pI6\n", &if_info.ip_addr.addr); +		len = sprintf(buf, "%pI6\n", if_info->ip_addr.addr);  		break;  	case ISCSI_NET_PARAM_IPV4_BOOTPROTO: -		if (!if_info.dhcp_state) +		if (!if_info->dhcp_state)  			len = sprintf(buf, "static\n");  		else  			len = sprintf(buf, "dhcp\n");  		break;  	case ISCSI_NET_PARAM_IPV4_SUBNET: -		len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask); +		len = sprintf(buf, "%pI4\n", if_info->ip_addr.subnet_mask);  		break;  	case ISCSI_NET_PARAM_VLAN_ENABLED:  		len = sprintf(buf, "%s\n", -			     (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) +			     (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)  			     ? "Disabled\n" : "Enabled\n");  		break;  	case ISCSI_NET_PARAM_VLAN_ID: -		if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) -			return -EINVAL; +		if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) +			len = -EINVAL;  		else  			len = sprintf(buf, "%d\n", -				     (if_info.vlan_priority & +				     (if_info->vlan_priority &  				     ISCSI_MAX_VLAN_ID));  		break;  	case ISCSI_NET_PARAM_VLAN_PRIORITY: -		if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE) -			return -EINVAL; +		if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) +			len = -EINVAL;  		else  			len = sprintf(buf, "%d\n", -				     ((if_info.vlan_priority >> 13) & +				     ((if_info->vlan_priority >> 13) &  				     ISCSI_MAX_VLAN_PRIORITY));  		break;  	default:  		WARN_ON(1);  	} +	kfree(if_info);  	return len;  } @@ -577,6 +598,12 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,  	struct be_cmd_get_def_gateway_resp gateway;  	int len = -ENOSYS; +	if (phba->state & BE_ADAPTER_PCI_ERR) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, +			    "BS_%d : In PCI_ERROR Recovery\n"); +		return -EBUSY; +	} +  	switch (param) {  	case ISCSI_NET_PARAM_IPV4_ADDR:  	case ISCSI_NET_PARAM_IPV4_SUBNET: @@ -672,8 +699,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,  			session->max_burst = 262144;  		break;  	case ISCSI_PARAM_MAX_XMIT_DLENGTH: -		if ((conn->max_xmit_dlength > 65536) || -		    (conn->max_xmit_dlength == 0)) +		if (conn->max_xmit_dlength > 65536)  			conn->max_xmit_dlength = 65536;  	default:  		return 0; @@ -727,7 +753,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)  	struct beiscsi_hba *phba = iscsi_host_priv(shost);  	struct iscsi_cls_host *ihost = shost->shost_data; -	ihost->port_state = (phba->state == BE_ADAPTER_UP) ? +	ihost->port_state = (phba->state == BE_ADAPTER_LINK_UP) ?  		ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;  } @@ -767,7 +793,7 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)  		ihost->port_speed = ISCSI_PORT_SPEED_10MBPS;  		break;  	case BE2ISCSI_LINK_SPEED_100MBPS: -		ihost->port_speed = BE2ISCSI_LINK_SPEED_100MBPS; +		ihost->port_speed = ISCSI_PORT_SPEED_100MBPS;  		break;  	case BE2ISCSI_LINK_SPEED_1GBPS:  		ihost->port_speed = ISCSI_PORT_SPEED_1GBPS; @@ -795,9 +821,16 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,  	struct beiscsi_hba *phba = iscsi_host_priv(shost);  	int status = 0; -	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, -		    "BS_%d : In beiscsi_get_host_param," -		    " param= %d\n", param); + +	if (phba->state & BE_ADAPTER_PCI_ERR) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, +			    "BS_%d : In PCI_ERROR Recovery\n"); +		return -EBUSY; +	} else { +		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, +			    "BS_%d : In beiscsi_get_host_param," +			    " param = %d\n", param); +	}  	switch (param) {  	case ISCSI_HOST_PARAM_HWADDRESS: @@ -840,7 +873,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)  	struct be_cmd_get_nic_conf_resp resp;  	int rc; -	if (strlen(phba->mac_address)) +	if (phba->mac_addr_set)  		return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);  	memset(&resp, 0, sizeof(resp)); @@ -848,6 +881,7 @@ int beiscsi_get_macaddr(char *buf, struct beiscsi_hba *phba)  	if (rc)  		return rc; +	phba->mac_addr_set = true;  	memcpy(phba->mac_address, resp.mac_address, ETH_ALEN);  	return sysfs_format_mac(buf, phba->mac_address, ETH_ALEN);  } @@ -923,6 +957,10 @@ static void  beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,  		      session->max_r2t);  	AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,  		      (conn->exp_statsn - 1)); +	AMAP_SET_BITS(struct amap_beiscsi_offload_params, +		      max_recv_data_segment_length, params, +		      conn->max_recv_dlength); +  }  /** @@ -935,10 +973,19 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)  	struct beiscsi_conn *beiscsi_conn = conn->dd_data;  	struct beiscsi_endpoint *beiscsi_ep;  	struct beiscsi_offload_params params; +	struct beiscsi_hba *phba; -	beiscsi_log(beiscsi_conn->phba, KERN_INFO, -		    BEISCSI_LOG_CONFIG, -		    "BS_%d : In beiscsi_conn_start\n"); +	phba = ((struct beiscsi_conn *)conn->dd_data)->phba; + +	if (phba->state & BE_ADAPTER_PCI_ERR) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, +			    "BS_%d : In PCI_ERROR Recovery\n"); +		return -EBUSY; +	} else { +		beiscsi_log(beiscsi_conn->phba, KERN_INFO, +			    BEISCSI_LOG_CONFIG, +			    "BS_%d : In beiscsi_conn_start\n"); +	}  	memset(¶ms, 0, sizeof(struct beiscsi_offload_params));  	beiscsi_ep = beiscsi_conn->ep; @@ -960,15 +1007,31 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)   */  static int beiscsi_get_cid(struct beiscsi_hba *phba)  { -	unsigned short cid = 0xFFFF; - -	if (!phba->avlbl_cids) -		return cid; - -	cid = phba->cid_array[phba->cid_alloc++]; -	if (phba->cid_alloc == phba->params.cxns_per_ctrl) -		phba->cid_alloc = 0; -	phba->avlbl_cids--; +	unsigned short cid = 0xFFFF, cid_from_ulp; +	struct ulp_cid_info *cid_info = NULL; +	uint16_t cid_avlbl_ulp0, cid_avlbl_ulp1; + +	/* Find the ULP which has more CID available */ +	cid_avlbl_ulp0 = (phba->cid_array_info[BEISCSI_ULP0]) ? +			  BEISCSI_ULP0_AVLBL_CID(phba) : 0; +	cid_avlbl_ulp1 = (phba->cid_array_info[BEISCSI_ULP1]) ? +			  BEISCSI_ULP1_AVLBL_CID(phba) : 0; +	cid_from_ulp = (cid_avlbl_ulp0 > cid_avlbl_ulp1) ? +			BEISCSI_ULP0 : BEISCSI_ULP1; + +	if (test_bit(cid_from_ulp, (void *)&phba->fw_config.ulp_supported)) { +		cid_info = phba->cid_array_info[cid_from_ulp]; +		if (!cid_info->avlbl_cids) +			return cid; + +		cid = cid_info->cid_array[cid_info->cid_alloc++]; + +		if (cid_info->cid_alloc == BEISCSI_GET_CID_COUNT( +					   phba, cid_from_ulp)) +			cid_info->cid_alloc = 0; + +		cid_info->avlbl_cids--; +	}  	return cid;  } @@ -979,10 +1042,22 @@ static int beiscsi_get_cid(struct beiscsi_hba *phba)   */  static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid)  { -	phba->avlbl_cids++; -	phba->cid_array[phba->cid_free++] = cid; -	if (phba->cid_free == phba->params.cxns_per_ctrl) -		phba->cid_free = 0; +	uint16_t cid_post_ulp; +	struct hwi_controller *phwi_ctrlr; +	struct hwi_wrb_context *pwrb_context; +	struct ulp_cid_info *cid_info = NULL; +	uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + +	phwi_ctrlr = phba->phwi_ctrlr; +	pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; +	cid_post_ulp = pwrb_context->ulp_num; + +	cid_info = phba->cid_array_info[cid_post_ulp]; +	cid_info->avlbl_cids++; + +	cid_info->cid_array[cid_info->cid_free++] = cid; +	if (cid_info->cid_free == BEISCSI_GET_CID_COUNT(phba, cid_post_ulp)) +		cid_info->cid_free = 0;  }  /** @@ -1031,7 +1106,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,  	struct beiscsi_hba *phba = beiscsi_ep->phba;  	struct tcp_connect_and_offload_out *ptcpcnct_out;  	struct be_dma_mem nonemb_cmd; -	unsigned int tag; +	unsigned int tag, req_memsize;  	int ret = -ENOMEM;  	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, @@ -1052,8 +1127,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,  		       (beiscsi_ep->ep_cid)] = ep;  	beiscsi_ep->cid_vld = 0; + +	if (is_chip_be2_be3r(phba)) +		req_memsize = sizeof(struct tcp_connect_and_offload_in); +	else +		req_memsize = sizeof(struct tcp_connect_and_offload_in_v1); +  	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, -				sizeof(struct tcp_connect_and_offload_in), +				req_memsize,  				&nonemb_cmd.dma);  	if (nonemb_cmd.va == NULL) { @@ -1064,7 +1145,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,  		beiscsi_free_ep(beiscsi_ep);  		return -ENOMEM;  	} -	nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); +	nonemb_cmd.size = req_memsize;  	memset(nonemb_cmd.va, 0, nonemb_cmd.size);  	tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);  	if (tag <= 0) { @@ -1078,16 +1159,18 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,  		return -EAGAIN;  	} -	ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); +	ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);  	if (ret) {  		beiscsi_log(phba, KERN_ERR,  			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,  			    "BS_%d : mgmt_open_connection Failed"); -		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, -			    nonemb_cmd.va, nonemb_cmd.dma); +		if (ret != -EBUSY) +			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, +					    nonemb_cmd.va, nonemb_cmd.dma); +  		beiscsi_free_ep(beiscsi_ep); -		return -EBUSY; +		return ret;  	}  	ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; @@ -1135,7 +1218,12 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,  		return ERR_PTR(ret);  	} -	if (phba->state != BE_ADAPTER_UP) { +	if (phba->state & BE_ADAPTER_PCI_ERR) { +		ret = -EBUSY; +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, +			    "BS_%d : In PCI_ERROR Recovery\n"); +		return ERR_PTR(ret); +	} else if (phba->state & BE_ADAPTER_LINK_DOWN) {  		ret = -EBUSY;  		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,  			    "BS_%d : The Adapter Port state is Down!!!\n"); @@ -1260,6 +1348,12 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)  		tcp_upload_flag = CONNECTION_UPLOAD_ABORT;  	} +	if (phba->state & BE_ADAPTER_PCI_ERR) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, +			    "BS_%d : PCI_ERROR Recovery\n"); +		goto free_ep; +	} +  	tag = mgmt_invalidate_connection(phba, beiscsi_ep,  					  beiscsi_ep->ep_cid,  					  mgmt_invalidate_flag, @@ -1272,6 +1366,8 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)  	beiscsi_mccq_compl(phba, tag, NULL, NULL);  	beiscsi_close_conn(beiscsi_ep, tcp_upload_flag); +free_ep: +	msleep(BEISCSI_LOGOUT_SYNC_DELAY);  	beiscsi_free_ep(beiscsi_ep);  	beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);  	iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index a1f5ac7a980..56467df3d6d 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -149,18 +149,25 @@ BEISCSI_RW_ATTR(log_enable, 0x00,  		"\t\t\t\tMiscellaneous Events	: 0x04\n"  		"\t\t\t\tError Handling		: 0x08\n"  		"\t\t\t\tIO Path Events		: 0x10\n" -		"\t\t\t\tConfiguration Path	: 0x20\n"); +		"\t\t\t\tConfiguration Path	: 0x20\n" +		"\t\t\t\tiSCSI Protocol		: 0x40\n");  DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);  DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);  DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); -DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL); +DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); +DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, +	     beiscsi_active_session_disp, NULL); +DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, +	     beiscsi_free_session_disp, NULL);  struct device_attribute *beiscsi_attrs[] = {  	&dev_attr_beiscsi_log_enable,  	&dev_attr_beiscsi_drvr_ver,  	&dev_attr_beiscsi_adapter_family,  	&dev_attr_beiscsi_fw_ver, -	&dev_attr_beiscsi_active_cid_count, +	&dev_attr_beiscsi_active_session_count, +	&dev_attr_beiscsi_free_session_count, +	&dev_attr_beiscsi_phys_port,  	NULL,  }; @@ -221,24 +228,30 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)  	struct invalidate_command_table *inv_tbl;  	struct be_dma_mem nonemb_cmd;  	unsigned int cid, tag, num_invalidate; +	int rc;  	cls_session = starget_to_session(scsi_target(sc->device));  	session = cls_session->dd_data; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (!aborted_task || !aborted_task->sc) {  		/* we raced */ -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		return SUCCESS;  	}  	aborted_io_task = aborted_task->dd_data;  	if (!aborted_io_task->scsi_cmnd) {  		/* raced or invalid command */ -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		return SUCCESS;  	} -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock); +	/* Invalidate WRB Posted for this Task */ +	AMAP_SET_BITS(struct amap_iscsi_wrb, invld, +		      aborted_io_task->pwrb_handle->pwrb, +		      1); +  	conn = aborted_task->conn;  	beiscsi_conn = conn->dd_data;  	phba = beiscsi_conn->phba; @@ -273,9 +286,11 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)  		return FAILED;  	} -	beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); -	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, -			    nonemb_cmd.va, nonemb_cmd.dma); +	rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); +	if (rc != -EBUSY) +		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, +				    nonemb_cmd.va, nonemb_cmd.dma); +  	return iscsi_eh_abort(sc);  } @@ -291,13 +306,14 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)  	struct invalidate_command_table *inv_tbl;  	struct be_dma_mem nonemb_cmd;  	unsigned int cid, tag, i, num_invalidate; +	int rc;  	/* invalidate iocbs */  	cls_session = starget_to_session(scsi_target(sc->device));  	session = cls_session->dd_data; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		return FAILED;  	}  	conn = session->leadconn; @@ -313,15 +329,20 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)  		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)  			continue; -		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) +		if (sc->device->lun != abrt_task->sc->device->lun)  			continue; +		/* Invalidate WRB Posted for this Task */ +		AMAP_SET_BITS(struct amap_iscsi_wrb, invld, +			      abrt_io_task->pwrb_handle->pwrb, +			      1); +  		inv_tbl->cid = cid;  		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;  		num_invalidate++;  		inv_tbl++;  	} -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	inv_tbl = phba->inv_tbl;  	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, @@ -346,9 +367,10 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)  		return FAILED;  	} -	beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); -	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, -			    nonemb_cmd.va, nonemb_cmd.dma); +	rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); +	if (rc != -EBUSY) +		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, +				    nonemb_cmd.va, nonemb_cmd.dma);  	return iscsi_eh_device_reset(sc);  } @@ -577,15 +599,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)  	pci_set_drvdata(pcidev, phba);  	phba->interface_handle = 0xFFFFFFFF; -	if (iscsi_host_add(shost, &phba->pcidev->dev)) -		goto free_devices; -  	return phba; - -free_devices: -	pci_dev_put(phba->pcidev); -	iscsi_host_free(phba->shost); -	return NULL;  }  static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) @@ -657,8 +671,19 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)  	}  	pci_set_master(pcidev); -	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { -		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); +	ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); +	if (ret) { +		ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); +		if (ret) { +			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); +			pci_disable_device(pcidev); +			return ret; +		} else { +			ret = pci_set_consistent_dma_mask(pcidev, +							  DMA_BIT_MASK(32)); +		} +	} else { +		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));  		if (ret) {  			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");  			pci_disable_device(pcidev); @@ -699,30 +724,85 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)  	return status;  } +/** + * beiscsi_get_params()- Set the config paramters + * @phba: ptr  device priv structure + **/  static void beiscsi_get_params(struct beiscsi_hba *phba)  { -	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count -				    - (phba->fw_config.iscsi_cid_count -				    + BE2_TMFS -				    + BE2_NOPOUT_REQ)); -	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; -	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count; -	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; +	uint32_t total_cid_count = 0; +	uint32_t total_icd_count = 0; +	uint8_t ulp_num = 0; + +	total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + +			  BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); + +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		uint32_t align_mask = 0; +		uint32_t icd_post_per_page = 0; +		uint32_t icd_count_unavailable = 0; +		uint32_t icd_start = 0, icd_count = 0; +		uint32_t icd_start_align = 0, icd_count_align = 0; + +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { +			icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; +			icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; + +			/* Get ICD count that can be posted on each page */ +			icd_post_per_page = (PAGE_SIZE / (BE2_SGE * +					     sizeof(struct iscsi_sge))); +			align_mask = (icd_post_per_page - 1); + +			/* Check if icd_start is aligned ICD per page posting */ +			if (icd_start % icd_post_per_page) { +				icd_start_align = ((icd_start + +						    icd_post_per_page) & +						    ~(align_mask)); +				phba->fw_config. +					iscsi_icd_start[ulp_num] = +					icd_start_align; +			} + +			icd_count_align = (icd_count & ~align_mask); + +			/* ICD discarded in the process of alignment */ +			if (icd_start_align) +				icd_count_unavailable = ((icd_start_align - +							  icd_start) + +							 (icd_count - +							  icd_count_align)); + +			/* Updated ICD count available */ +			phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - +					icd_count_unavailable); + +			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +					"BM_%d : Aligned ICD values\n" +					"\t ICD Start : %d\n" +					"\t ICD Count : %d\n" +					"\t ICD Discarded : %d\n", +					phba->fw_config. +					iscsi_icd_start[ulp_num], +					phba->fw_config. +					iscsi_icd_count[ulp_num], +					icd_count_unavailable); +			break; +		} +	} + +	total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; +	phba->params.ios_per_ctrl = (total_icd_count - +				    (total_cid_count + +				     BE2_TMFS + BE2_NOPOUT_REQ)); +	phba->params.cxns_per_ctrl = total_cid_count; +	phba->params.asyncpdus_per_ctrl = total_cid_count; +	phba->params.icds_per_ctrl = total_icd_count;  	phba->params.num_sge_per_io = BE2_SGE;  	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;  	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;  	phba->params.eq_timer = 64; -	phba->params.num_eq_entries = -	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 -				    + BE2_TMFS) / 512) + 1) * 512; -	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) -				? 1024 : phba->params.num_eq_entries; -	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -		    "BM_%d : phba->params.num_eq_entries=%d\n", -		    phba->params.num_eq_entries); -	phba->params.num_cq_entries = -	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2 -				    + BE2_TMFS) / 512) + 1) * 512; +	phba->params.num_eq_entries = 1024; +	phba->params.num_cq_entries = 1024;  	phba->params.wrbs_per_cxn = 256;  } @@ -732,14 +812,23 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,  			   unsigned char rearm, unsigned char event)  {  	u32 val = 0; -	val |= id & DB_EQ_RING_ID_MASK; +  	if (rearm)  		val |= 1 << DB_EQ_REARM_SHIFT;  	if (clr_interrupt)  		val |= 1 << DB_EQ_CLR_SHIFT;  	if (event)  		val |= 1 << DB_EQ_EVNT_SHIFT; +  	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; +	/* Setting lower order EQ_ID Bits */ +	val |= (id & DB_EQ_RING_ID_LOW_MASK); + +	/* Setting Higher order EQ_ID Bits */ +	val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & +		  DB_EQ_RING_ID_HIGH_MASK) +		  << DB_EQ_HIGH_SET_SHIFT); +  	iowrite32(val, phba->db_va + DB_EQ_OFFSET);  } @@ -801,7 +890,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)  	struct be_queue_info *cq;  	unsigned int num_eq_processed;  	struct be_eq_obj *pbe_eq; -	unsigned long flags;  	pbe_eq = dev_id;  	eq = &pbe_eq->q; @@ -810,31 +898,15 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)  	phba = pbe_eq->phba;  	num_eq_processed = 0; -	if (blk_iopoll_enabled) { -		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] -					& EQE_VALID_MASK) { -			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) -				blk_iopoll_sched(&pbe_eq->iopoll); - -			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); -			queue_tail_inc(eq); -			eqe = queue_tail_node(eq); -			num_eq_processed++; -		} -	} else { -		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] -						& EQE_VALID_MASK) { -			spin_lock_irqsave(&phba->isr_lock, flags); -			pbe_eq->todo_cq = true; -			spin_unlock_irqrestore(&phba->isr_lock, flags); -			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); -			queue_tail_inc(eq); -			eqe = queue_tail_node(eq); -			num_eq_processed++; -		} +	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] +				& EQE_VALID_MASK) { +		if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) +			blk_iopoll_sched(&pbe_eq->iopoll); -		if (pbe_eq->todo_cq) -			queue_work(phba->wq, &pbe_eq->work_cqs); +		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); +		queue_tail_inc(eq); +		eqe = queue_tail_node(eq); +		num_eq_processed++;  	}  	if (num_eq_processed) @@ -855,7 +927,6 @@ static irqreturn_t be_isr(int irq, void *dev_id)  	struct hwi_context_memory *phwi_context;  	struct be_eq_entry *eqe = NULL;  	struct be_queue_info *eq; -	struct be_queue_info *cq;  	struct be_queue_info *mcc;  	unsigned long flags, index;  	unsigned int num_mcceq_processed, num_ioeq_processed; @@ -881,72 +952,40 @@ static irqreturn_t be_isr(int irq, void *dev_id)  	num_ioeq_processed = 0;  	num_mcceq_processed = 0; -	if (blk_iopoll_enabled) { -		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] -					& EQE_VALID_MASK) { -			if (((eqe->dw[offsetof(struct amap_eq_entry, -			     resource_id) / 32] & -			     EQE_RESID_MASK) >> 16) == mcc->id) { -				spin_lock_irqsave(&phba->isr_lock, flags); -				pbe_eq->todo_mcc_cq = true; -				spin_unlock_irqrestore(&phba->isr_lock, flags); -				num_mcceq_processed++; -			} else { -				if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) -					blk_iopoll_sched(&pbe_eq->iopoll); -				num_ioeq_processed++; -			} -			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); -			queue_tail_inc(eq); -			eqe = queue_tail_node(eq); -		} -		if (num_ioeq_processed || num_mcceq_processed) { -			if (pbe_eq->todo_mcc_cq) -				queue_work(phba->wq, &pbe_eq->work_cqs); - -			if ((num_mcceq_processed) && (!num_ioeq_processed)) -				hwi_ring_eq_db(phba, eq->id, 0, -					      (num_ioeq_processed + -					       num_mcceq_processed) , 1, 1); -			else -				hwi_ring_eq_db(phba, eq->id, 0, -					       (num_ioeq_processed + -						num_mcceq_processed), 0, 1); - -			return IRQ_HANDLED; -		} else -			return IRQ_NONE; -	} else { -		cq = &phwi_context->be_cq[0]; -		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] -						& EQE_VALID_MASK) { - -			if (((eqe->dw[offsetof(struct amap_eq_entry, -			     resource_id) / 32] & -			     EQE_RESID_MASK) >> 16) != cq->id) { -				spin_lock_irqsave(&phba->isr_lock, flags); -				pbe_eq->todo_mcc_cq = true; -				spin_unlock_irqrestore(&phba->isr_lock, flags); -			} else { -				spin_lock_irqsave(&phba->isr_lock, flags); -				pbe_eq->todo_cq = true; -				spin_unlock_irqrestore(&phba->isr_lock, flags); -			} -			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); -			queue_tail_inc(eq); -			eqe = queue_tail_node(eq); +	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] +				& EQE_VALID_MASK) { +		if (((eqe->dw[offsetof(struct amap_eq_entry, +		     resource_id) / 32] & +		     EQE_RESID_MASK) >> 16) == mcc->id) { +			spin_lock_irqsave(&phba->isr_lock, flags); +			pbe_eq->todo_mcc_cq = true; +			spin_unlock_irqrestore(&phba->isr_lock, flags); +			num_mcceq_processed++; +		} else { +			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) +				blk_iopoll_sched(&pbe_eq->iopoll);  			num_ioeq_processed++;  		} -		if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq) +		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); +		queue_tail_inc(eq); +		eqe = queue_tail_node(eq); +	} +	if (num_ioeq_processed || num_mcceq_processed) { +		if (pbe_eq->todo_mcc_cq)  			queue_work(phba->wq, &pbe_eq->work_cqs); -		if (num_ioeq_processed) { +		if ((num_mcceq_processed) && (!num_ioeq_processed))  			hwi_ring_eq_db(phba, eq->id, 0, -				       num_ioeq_processed, 1, 1); -			return IRQ_HANDLED; -		} else -			return IRQ_NONE; -	} +				      (num_ioeq_processed + +				       num_mcceq_processed) , 1, 1); +		else +			hwi_ring_eq_db(phba, eq->id, 0, +				       (num_ioeq_processed + +					num_mcceq_processed), 0, 1); + +		return IRQ_HANDLED; +	} else +		return IRQ_NONE;  }  static int beiscsi_init_irqs(struct beiscsi_hba *phba) @@ -1021,15 +1060,25 @@ free_msix_irqs:  	return ret;  } -static void hwi_ring_cq_db(struct beiscsi_hba *phba, +void hwi_ring_cq_db(struct beiscsi_hba *phba,  			   unsigned int id, unsigned int num_processed,  			   unsigned char rearm, unsigned char event)  {  	u32 val = 0; -	val |= id & DB_CQ_RING_ID_MASK; +  	if (rearm)  		val |= 1 << DB_CQ_REARM_SHIFT; +  	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; + +	/* Setting lower order CQ_ID Bits */ +	val |= (id & DB_CQ_RING_ID_LOW_MASK); + +	/* Setting Higher order CQ_ID Bits */ +	val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & +		  DB_CQ_RING_ID_HIGH_MASK) +		  << DB_CQ_HIGH_SET_SHIFT); +  	iowrite32(val, phba->db_va + DB_CQ_OFFSET);  } @@ -1078,9 +1127,9 @@ beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,  		return 1;  	} -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->back_lock);  	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->back_lock);  	return 0;  } @@ -1270,8 +1319,10 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,  	resid = csol_cqe->res_cnt;  	if (!task->sc) { -		if (io_task->scsi_cmnd) +		if (io_task->scsi_cmnd) {  			scsi_dma_unmap(io_task->scsi_cmnd); +			io_task->scsi_cmnd = NULL; +		}  		return;  	} @@ -1308,6 +1359,7 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,  		conn->rxdata_octets += resid;  unmap:  	scsi_dma_unmap(io_task->scsi_cmnd); +	io_task->scsi_cmnd = NULL;  	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);  } @@ -1496,7 +1548,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,  	pwrb = pwrb_handle->pwrb;  	type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->back_lock);  	switch (type) {  	case HWH_TYPE_IO:  	case HWH_TYPE_IO_RD: @@ -1535,7 +1587,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,  		break;  	} -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->back_lock);  }  static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context @@ -1613,8 +1665,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba,  	WARN_ON(!pasync_handle); -	pasync_handle->cri = -			BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); +	pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID( +			     beiscsi_conn->beiscsi_conn_cid);  	pasync_handle->is_header = is_header;  	pasync_handle->buffer_len = dpl;  	*pcq_index = index; @@ -1674,18 +1726,13 @@ hwi_update_async_writables(struct beiscsi_hba *phba,  }  static void hwi_free_async_msg(struct beiscsi_hba *phba, -				       unsigned int cri) +			       struct hwi_async_pdu_context *pasync_ctx, +			       unsigned int cri)  { -	struct hwi_controller *phwi_ctrlr; -	struct hwi_async_pdu_context *pasync_ctx;  	struct async_pdu_handle *pasync_handle, *tmp_handle;  	struct list_head *plist; -	phwi_ctrlr = phba->phwi_ctrlr; -	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); -  	plist  = &pasync_ctx->async_entry[cri].wait_queue.list; -  	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {  		list_del(&pasync_handle->link); @@ -1720,7 +1767,7 @@ hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,  }  static void hwi_post_async_buffers(struct beiscsi_hba *phba, -				   unsigned int is_header) +				    unsigned int is_header, uint8_t ulp_num)  {  	struct hwi_controller *phwi_ctrlr;  	struct hwi_async_pdu_context *pasync_ctx; @@ -1728,13 +1775,13 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,  	struct list_head *pfree_link, *pbusy_list;  	struct phys_addr *pasync_sge;  	unsigned int ring_id, num_entries; -	unsigned int host_write_num; +	unsigned int host_write_num, doorbell_offset;  	unsigned int writables;  	unsigned int i = 0;  	u32 doorbell = 0;  	phwi_ctrlr = phba->phwi_ctrlr; -	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); +	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);  	num_entries = pasync_ctx->num_entries;  	if (is_header) { @@ -1742,13 +1789,17 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,  				pasync_ctx->async_header.free_entries);  		pfree_link = pasync_ctx->async_header.free_list.next;  		host_write_num = pasync_ctx->async_header.host_write_ptr; -		ring_id = phwi_ctrlr->default_pdu_hdr.id; +		ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; +		doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. +				  doorbell_offset;  	} else {  		writables = min(pasync_ctx->async_data.writables,  				pasync_ctx->async_data.free_entries);  		pfree_link = pasync_ctx->async_data.free_list.next;  		host_write_num = pasync_ctx->async_data.host_write_ptr; -		ring_id = phwi_ctrlr->default_pdu_data.id; +		ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; +		doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. +				  doorbell_offset;  	}  	writables = (writables / 8) * 8; @@ -1796,7 +1847,7 @@ static void hwi_post_async_buffers(struct beiscsi_hba *phba,  		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)  					<< DB_DEF_PDU_CQPROC_SHIFT; -		iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET); +		iowrite32(doorbell, phba->db_va + doorbell_offset);  	}  } @@ -1808,9 +1859,13 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,  	struct hwi_async_pdu_context *pasync_ctx;  	struct async_pdu_handle *pasync_handle = NULL;  	unsigned int cq_index = -1; +	uint16_t cri_index = BE_GET_CRI_FROM_CID( +			     beiscsi_conn->beiscsi_conn_cid);  	phwi_ctrlr = phba->phwi_ctrlr; -	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); +	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, +		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, +		     cri_index));  	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,  					     pdpdu_cqe, &cq_index); @@ -1819,8 +1874,10 @@ static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,  		hwi_update_async_writables(phba, pasync_ctx,  					   pasync_handle->is_header, cq_index); -	hwi_free_async_msg(phba, pasync_handle->cri); -	hwi_post_async_buffers(phba, pasync_handle->is_header); +	hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri); +	hwi_post_async_buffers(phba, pasync_handle->is_header, +			       BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, +			       cri_index));  }  static unsigned int @@ -1859,7 +1916,7 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,  					    phdr, hdr_len, pfirst_buffer,  					    offset); -	hwi_free_async_msg(phba, cri); +	hwi_free_async_msg(phba, pasync_ctx, cri);  	return 0;  } @@ -1875,13 +1932,16 @@ hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,  	struct pdu_base *ppdu;  	phwi_ctrlr = phba->phwi_ctrlr; -	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); +	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, +		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, +		     BE_GET_CRI_FROM_CID(beiscsi_conn-> +				 beiscsi_conn_cid)));  	list_del(&pasync_handle->link);  	if (pasync_handle->is_header) {  		pasync_ctx->async_header.busy_entries--;  		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { -			hwi_free_async_msg(phba, cri); +			hwi_free_async_msg(phba, pasync_ctx, cri);  			BUG();  		} @@ -1936,9 +1996,14 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,  	struct hwi_async_pdu_context *pasync_ctx;  	struct async_pdu_handle *pasync_handle = NULL;  	unsigned int cq_index = -1; +	uint16_t cri_index = BE_GET_CRI_FROM_CID( +			     beiscsi_conn->beiscsi_conn_cid);  	phwi_ctrlr = phba->phwi_ctrlr; -	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); +	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, +		     BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, +		     cri_index)); +  	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,  					     pdpdu_cqe, &cq_index); @@ -1947,7 +2012,9 @@ static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,  					   pasync_handle->is_header, cq_index);  	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); -	hwi_post_async_buffers(phba, pasync_handle->is_header); +	hwi_post_async_buffers(phba, pasync_handle->is_header, +			       BEISCSI_GET_ULP_FROM_CRI( +			       phwi_ctrlr, cri_index));  }  static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba) @@ -2072,8 +2139,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)  				    "BM_%d : Received %s[%d] on CID : %d\n",  				    cqe_desc[code], code, cid); +			spin_lock_bh(&phba->async_pdu_lock);  			hwi_process_default_pdu_ring(beiscsi_conn, phba,  					     (struct i_t_dpdu_cqe *)sol); +			spin_unlock_bh(&phba->async_pdu_lock);  			break;  		case UNSOL_DATA_NOTIFY:  			beiscsi_log(phba, KERN_INFO, @@ -2081,8 +2150,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)  				    "BM_%d : Received %s[%d] on CID : %d\n",  				    cqe_desc[code], code, cid); +			spin_lock_bh(&phba->async_pdu_lock);  			hwi_process_default_pdu_ring(beiscsi_conn, phba,  					     (struct i_t_dpdu_cqe *)sol); +			spin_unlock_bh(&phba->async_pdu_lock);  			break;  		case CXN_INVALIDATE_INDEX_NOTIFY:  		case CMD_INVALIDATED_NOTIFY: @@ -2110,8 +2181,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)  				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,  				    "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",  				    cqe_desc[code], code, cid); +			spin_lock_bh(&phba->async_pdu_lock);  			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,  					     (struct i_t_dpdu_cqe *) sol); +			spin_unlock_bh(&phba->async_pdu_lock);  			break;  		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:  		case CXN_KILLED_BURST_LEN_MISMATCH: @@ -2198,6 +2271,7 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)  	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);  	ret = beiscsi_process_cq(pbe_eq); +	pbe_eq->cq_count += ret;  	if (ret < budget) {  		phba = pbe_eq->phba;  		blk_iopoll_complete(iop); @@ -2476,26 +2550,19 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)  	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);  } +/** + * beiscsi_find_mem_req()- Find mem needed + * @phba: ptr to HBA struct + **/  static void beiscsi_find_mem_req(struct beiscsi_hba *phba)  { +	uint8_t mem_descr_index, ulp_num;  	unsigned int num_cq_pages, num_async_pdu_buf_pages;  	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;  	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;  	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \  				      sizeof(struct sol_cqe)); -	num_async_pdu_buf_pages = -			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ -				       phba->params.defpdu_hdr_sz); -	num_async_pdu_buf_sgl_pages = -			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ -				       sizeof(struct phys_addr)); -	num_async_pdu_data_pages = -			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ -				       phba->params.defpdu_data_sz); -	num_async_pdu_data_sgl_pages = -			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ -				       sizeof(struct phys_addr));  	phba->params.hwi_ws_sz = sizeof(struct hwi_controller); @@ -2517,24 +2584,79 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)  		phba->params.icds_per_ctrl;  	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *  		phba->params.num_sge_per_io * phba->params.icds_per_ctrl; - -	phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] = -		num_async_pdu_buf_pages * PAGE_SIZE; -	phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] = -		num_async_pdu_data_pages * PAGE_SIZE; -	phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] = -		num_async_pdu_buf_sgl_pages * PAGE_SIZE; -	phba->mem_req[HWI_MEM_ASYNC_DATA_RING] = -		num_async_pdu_data_sgl_pages * PAGE_SIZE; -	phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] = -		phba->params.asyncpdus_per_ctrl * -		sizeof(struct async_pdu_handle); -	phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] = -		phba->params.asyncpdus_per_ctrl * -		sizeof(struct async_pdu_handle); -	phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] = -		sizeof(struct hwi_async_pdu_context) + -		(phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry)); +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + +			num_async_pdu_buf_sgl_pages = +				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( +					       phba, ulp_num) * +					       sizeof(struct phys_addr)); + +			num_async_pdu_buf_pages = +				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( +					       phba, ulp_num) * +					       phba->params.defpdu_hdr_sz); + +			num_async_pdu_data_pages = +				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( +					       phba, ulp_num) * +					       phba->params.defpdu_data_sz); + +			num_async_pdu_data_sgl_pages = +				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( +					       phba, ulp_num) * +					       sizeof(struct phys_addr)); + +			mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + +					  (ulp_num * MEM_DESCR_OFFSET)); +			phba->mem_req[mem_descr_index] = +					BEISCSI_GET_CID_COUNT(phba, ulp_num) * +					BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; + +			mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + +					  (ulp_num * MEM_DESCR_OFFSET)); +			phba->mem_req[mem_descr_index] = +					  num_async_pdu_buf_pages * +					  PAGE_SIZE; + +			mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + +					  (ulp_num * MEM_DESCR_OFFSET)); +			phba->mem_req[mem_descr_index] = +					  num_async_pdu_data_pages * +					  PAGE_SIZE; + +			mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + +					  (ulp_num * MEM_DESCR_OFFSET)); +			phba->mem_req[mem_descr_index] = +					  num_async_pdu_buf_sgl_pages * +					  PAGE_SIZE; + +			mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + +					  (ulp_num * MEM_DESCR_OFFSET)); +			phba->mem_req[mem_descr_index] = +					  num_async_pdu_data_sgl_pages * +					  PAGE_SIZE; + +			mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + +					  (ulp_num * MEM_DESCR_OFFSET)); +			phba->mem_req[mem_descr_index] = +					  BEISCSI_GET_CID_COUNT(phba, ulp_num) * +					  sizeof(struct async_pdu_handle); + +			mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + +					  (ulp_num * MEM_DESCR_OFFSET)); +			phba->mem_req[mem_descr_index] = +					  BEISCSI_GET_CID_COUNT(phba, ulp_num) * +					  sizeof(struct async_pdu_handle); + +			mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + +					  (ulp_num * MEM_DESCR_OFFSET)); +			phba->mem_req[mem_descr_index] = +					  sizeof(struct hwi_async_pdu_context) + +					 (BEISCSI_GET_CID_COUNT(phba, ulp_num) * +					  sizeof(struct hwi_async_entry)); +		} +	}  }  static int beiscsi_alloc_mem(struct beiscsi_hba *phba) @@ -2576,6 +2698,12 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)  	mem_descr = phba->init_mem;  	for (i = 0; i < SE_MEM_MAX; i++) { +		if (!phba->mem_req[i]) { +			mem_descr->mem_array = NULL; +			mem_descr++; +			continue; +		} +  		j = 0;  		mem_arr = mem_arr_orig;  		alloc_size = phba->mem_req[i]; @@ -2697,7 +2825,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)  	/* Allocate memory for WRBQ */  	phwi_ctxt = phwi_ctrlr->phwi_ctxt;  	phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * -				     phba->fw_config.iscsi_cid_count, +				     phba->params.cxns_per_ctrl,  				     GFP_KERNEL);  	if (!phwi_ctxt->be_wrbq) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -2779,6 +2907,7 @@ init_wrb_hndl_failed:  static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)  { +	uint8_t ulp_num;  	struct hwi_controller *phwi_ctrlr;  	struct hba_parameters *p = &phba->params;  	struct hwi_async_pdu_context *pasync_ctx; @@ -2786,155 +2915,150 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)  	unsigned int index, idx, num_per_mem, num_async_data;  	struct be_mem_descriptor *mem_descr; -	mem_descr = (struct be_mem_descriptor *)phba->init_mem; -	mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT; - -	phwi_ctrlr = phba->phwi_ctrlr; -	phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *) +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + +			mem_descr = (struct be_mem_descriptor *)phba->init_mem; +			mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + +				     (ulp_num * MEM_DESCR_OFFSET)); + +			phwi_ctrlr = phba->phwi_ctrlr; +			phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = +				(struct hwi_async_pdu_context *) +				 mem_descr->mem_array[0].virtual_address; + +			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; +			memset(pasync_ctx, 0, sizeof(*pasync_ctx)); + +			pasync_ctx->async_entry = +					(struct hwi_async_entry *) +					((long unsigned int)pasync_ctx + +					sizeof(struct hwi_async_pdu_context)); + +			pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, +						  ulp_num); +			pasync_ctx->buffer_size = p->defpdu_hdr_sz; + +			mem_descr = (struct be_mem_descriptor *)phba->init_mem; +			mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + +				(ulp_num * MEM_DESCR_OFFSET); +			if (mem_descr->mem_array[0].virtual_address) { +				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +					    "BM_%d : hwi_init_async_pdu_ctx" +					    " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", +					    ulp_num, +					    mem_descr->mem_array[0]. +					    virtual_address); +			} else +				beiscsi_log(phba, KERN_WARNING, +					    BEISCSI_LOG_INIT, +					    "BM_%d : No Virtual address for ULP : %d\n", +					    ulp_num); + +			pasync_ctx->async_header.va_base =  				mem_descr->mem_array[0].virtual_address; -	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; -	memset(pasync_ctx, 0, sizeof(*pasync_ctx)); - -	pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) * -					  phba->fw_config.iscsi_cid_count, -					  GFP_KERNEL); -	if (!pasync_ctx->async_entry) { -		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n"); -		return -ENOMEM; -	} - -	pasync_ctx->num_entries = p->asyncpdus_per_ctrl; -	pasync_ctx->buffer_size = p->defpdu_hdr_sz; - -	mem_descr = (struct be_mem_descriptor *)phba->init_mem; -	mem_descr += HWI_MEM_ASYNC_HEADER_BUF; -	if (mem_descr->mem_array[0].virtual_address) { -		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -			    "BM_%d : hwi_init_async_pdu_ctx" -			    " HWI_MEM_ASYNC_HEADER_BUF va=%p\n", -			    mem_descr->mem_array[0].virtual_address); -	} else -		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, -			    "BM_%d : No Virtual address\n"); - -	pasync_ctx->async_header.va_base = -			mem_descr->mem_array[0].virtual_address; - -	pasync_ctx->async_header.pa_base.u.a64.address = -			mem_descr->mem_array[0].bus_address.u.a64.address; - -	mem_descr = (struct be_mem_descriptor *)phba->init_mem; -	mem_descr += HWI_MEM_ASYNC_HEADER_RING; -	if (mem_descr->mem_array[0].virtual_address) { -		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -			    "BM_%d : hwi_init_async_pdu_ctx" -			    " HWI_MEM_ASYNC_HEADER_RING va=%p\n", -			    mem_descr->mem_array[0].virtual_address); -	} else -		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, -			    "BM_%d : No Virtual address\n"); - -	pasync_ctx->async_header.ring_base = -			mem_descr->mem_array[0].virtual_address; - -	mem_descr = (struct be_mem_descriptor *)phba->init_mem; -	mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE; -	if (mem_descr->mem_array[0].virtual_address) { -		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -			    "BM_%d : hwi_init_async_pdu_ctx" -			    " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n", -			    mem_descr->mem_array[0].virtual_address); -	} else -		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, -			    "BM_%d : No Virtual address\n"); -	pasync_ctx->async_header.handle_base = -			mem_descr->mem_array[0].virtual_address; -	pasync_ctx->async_header.writables = 0; -	INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); - - -	mem_descr = (struct be_mem_descriptor *)phba->init_mem; -	mem_descr += HWI_MEM_ASYNC_DATA_RING; -	if (mem_descr->mem_array[0].virtual_address) { -		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -			    "BM_%d : hwi_init_async_pdu_ctx" -			    " HWI_MEM_ASYNC_DATA_RING va=%p\n", -			    mem_descr->mem_array[0].virtual_address); -	} else -		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, -			    "BM_%d : No Virtual address\n"); - -	pasync_ctx->async_data.ring_base = -			mem_descr->mem_array[0].virtual_address; - -	mem_descr = (struct be_mem_descriptor *)phba->init_mem; -	mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; -	if (!mem_descr->mem_array[0].virtual_address) -		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, -			    "BM_%d : No Virtual address\n"); +			pasync_ctx->async_header.pa_base.u.a64.address = +				mem_descr->mem_array[0]. +				bus_address.u.a64.address; -	pasync_ctx->async_data.handle_base = -			mem_descr->mem_array[0].virtual_address; -	pasync_ctx->async_data.writables = 0; -	INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); +			mem_descr = (struct be_mem_descriptor *)phba->init_mem; +			mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + +				     (ulp_num * MEM_DESCR_OFFSET); +			if (mem_descr->mem_array[0].virtual_address) { +				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +					    "BM_%d : hwi_init_async_pdu_ctx" +					    " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", +					    ulp_num, +					    mem_descr->mem_array[0]. +					    virtual_address); +			} else +				beiscsi_log(phba, KERN_WARNING, +					    BEISCSI_LOG_INIT, +					    "BM_%d : No Virtual address for ULP : %d\n", +					    ulp_num); + +			pasync_ctx->async_header.ring_base = +				mem_descr->mem_array[0].virtual_address; -	pasync_header_h = -		(struct async_pdu_handle *)pasync_ctx->async_header.handle_base; -	pasync_data_h = -		(struct async_pdu_handle *)pasync_ctx->async_data.handle_base; +			mem_descr = (struct be_mem_descriptor *)phba->init_mem; +			mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + +				     (ulp_num * MEM_DESCR_OFFSET); +			if (mem_descr->mem_array[0].virtual_address) { +				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +					    "BM_%d : hwi_init_async_pdu_ctx" +					    " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", +					    ulp_num, +					    mem_descr->mem_array[0]. +					    virtual_address); +			} else +				beiscsi_log(phba, KERN_WARNING, +					    BEISCSI_LOG_INIT, +					    "BM_%d : No Virtual address for ULP : %d\n", +					    ulp_num); + +			pasync_ctx->async_header.handle_base = +				mem_descr->mem_array[0].virtual_address; +			pasync_ctx->async_header.writables = 0; +			INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); + +			mem_descr = (struct be_mem_descriptor *)phba->init_mem; +			mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + +				     (ulp_num * MEM_DESCR_OFFSET); +			if (mem_descr->mem_array[0].virtual_address) { +				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +					    "BM_%d : hwi_init_async_pdu_ctx" +					    " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", +					    ulp_num, +					    mem_descr->mem_array[0]. +					    virtual_address); +			} else +				beiscsi_log(phba, KERN_WARNING, +					    BEISCSI_LOG_INIT, +					    "BM_%d : No Virtual address for ULP : %d\n", +					    ulp_num); + +			pasync_ctx->async_data.ring_base = +				mem_descr->mem_array[0].virtual_address; -	mem_descr = (struct be_mem_descriptor *)phba->init_mem; -	mem_descr += HWI_MEM_ASYNC_DATA_BUF; -	if (mem_descr->mem_array[0].virtual_address) { -		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -			    "BM_%d : hwi_init_async_pdu_ctx" -			    " HWI_MEM_ASYNC_DATA_BUF va=%p\n", -			    mem_descr->mem_array[0].virtual_address); -	} else -		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, -			    "BM_%d : No Virtual address\n"); +			mem_descr = (struct be_mem_descriptor *)phba->init_mem; +			mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + +				     (ulp_num * MEM_DESCR_OFFSET); +			if (!mem_descr->mem_array[0].virtual_address) +				beiscsi_log(phba, KERN_WARNING, +					    BEISCSI_LOG_INIT, +					    "BM_%d : No Virtual address for ULP : %d\n", +					    ulp_num); -	idx = 0; -	pasync_ctx->async_data.va_base = -			mem_descr->mem_array[idx].virtual_address; -	pasync_ctx->async_data.pa_base.u.a64.address = -			mem_descr->mem_array[idx].bus_address.u.a64.address; - -	num_async_data = ((mem_descr->mem_array[idx].size) / -				phba->params.defpdu_data_sz); -	num_per_mem = 0; - -	for (index = 0; index < p->asyncpdus_per_ctrl; index++) { -		pasync_header_h->cri = -1; -		pasync_header_h->index = (char)index; -		INIT_LIST_HEAD(&pasync_header_h->link); -		pasync_header_h->pbuffer = -			(void *)((unsigned long) -			(pasync_ctx->async_header.va_base) + -			(p->defpdu_hdr_sz * index)); - -		pasync_header_h->pa.u.a64.address = -			pasync_ctx->async_header.pa_base.u.a64.address + -			(p->defpdu_hdr_sz * index); - -		list_add_tail(&pasync_header_h->link, -				&pasync_ctx->async_header.free_list); -		pasync_header_h++; -		pasync_ctx->async_header.free_entries++; -		pasync_ctx->async_header.writables++; - -		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list); -		INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. -			       header_busy_list); -		pasync_data_h->cri = -1; -		pasync_data_h->index = (char)index; -		INIT_LIST_HEAD(&pasync_data_h->link); - -		if (!num_async_data) { -			num_per_mem = 0; -			idx++; +			pasync_ctx->async_data.handle_base = +				mem_descr->mem_array[0].virtual_address; +			pasync_ctx->async_data.writables = 0; +			INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); + +			pasync_header_h = +				(struct async_pdu_handle *) +				pasync_ctx->async_header.handle_base; +			pasync_data_h = +				(struct async_pdu_handle *) +				pasync_ctx->async_data.handle_base; + +			mem_descr = (struct be_mem_descriptor *)phba->init_mem; +			mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + +				     (ulp_num * MEM_DESCR_OFFSET); +			if (mem_descr->mem_array[0].virtual_address) { +				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +					    "BM_%d : hwi_init_async_pdu_ctx" +					    " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", +					    ulp_num, +					    mem_descr->mem_array[0]. +					    virtual_address); +			} else +				beiscsi_log(phba, KERN_WARNING, +					    BEISCSI_LOG_INIT, +					    "BM_%d : No Virtual address for ULP : %d\n", +					    ulp_num); + +			idx = 0;  			pasync_ctx->async_data.va_base =  				mem_descr->mem_array[idx].virtual_address;  			pasync_ctx->async_data.pa_base.u.a64.address = @@ -2943,32 +3067,83 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)  			num_async_data = ((mem_descr->mem_array[idx].size) /  					phba->params.defpdu_data_sz); -		} -		pasync_data_h->pbuffer = -			(void *)((unsigned long) -			(pasync_ctx->async_data.va_base) + -			(p->defpdu_data_sz * num_per_mem)); - -		pasync_data_h->pa.u.a64.address = -		    pasync_ctx->async_data.pa_base.u.a64.address + -		    (p->defpdu_data_sz * num_per_mem); -		num_per_mem++; -		num_async_data--; +			num_per_mem = 0; -		list_add_tail(&pasync_data_h->link, -			      &pasync_ctx->async_data.free_list); -		pasync_data_h++; -		pasync_ctx->async_data.free_entries++; -		pasync_ctx->async_data.writables++; +			for (index = 0;	index < BEISCSI_GET_CID_COUNT +					(phba, ulp_num); index++) { +				pasync_header_h->cri = -1; +				pasync_header_h->index = (char)index; +				INIT_LIST_HEAD(&pasync_header_h->link); +				pasync_header_h->pbuffer = +					(void *)((unsigned long) +						 (pasync_ctx-> +						  async_header.va_base) + +						 (p->defpdu_hdr_sz * index)); + +				pasync_header_h->pa.u.a64.address = +					pasync_ctx->async_header.pa_base.u.a64. +					address + (p->defpdu_hdr_sz * index); + +				list_add_tail(&pasync_header_h->link, +					      &pasync_ctx->async_header. +					      free_list); +				pasync_header_h++; +				pasync_ctx->async_header.free_entries++; +				pasync_ctx->async_header.writables++; + +				INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. +					       wait_queue.list); +				INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. +					       header_busy_list); +				pasync_data_h->cri = -1; +				pasync_data_h->index = (char)index; +				INIT_LIST_HEAD(&pasync_data_h->link); + +				if (!num_async_data) { +					num_per_mem = 0; +					idx++; +					pasync_ctx->async_data.va_base = +						mem_descr->mem_array[idx]. +						virtual_address; +					pasync_ctx->async_data.pa_base.u. +						a64.address = +						mem_descr->mem_array[idx]. +						bus_address.u.a64.address; +					num_async_data = +						((mem_descr->mem_array[idx]. +						  size) / +						 phba->params.defpdu_data_sz); +				} +				pasync_data_h->pbuffer = +					(void *)((unsigned long) +					(pasync_ctx->async_data.va_base) + +					(p->defpdu_data_sz * num_per_mem)); + +				pasync_data_h->pa.u.a64.address = +					pasync_ctx->async_data.pa_base.u.a64. +					address + (p->defpdu_data_sz * +					num_per_mem); +				num_per_mem++; +				num_async_data--; + +				list_add_tail(&pasync_data_h->link, +					      &pasync_ctx->async_data. +					      free_list); +				pasync_data_h++; +				pasync_ctx->async_data.free_entries++; +				pasync_ctx->async_data.writables++; + +				INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. +					       data_busy_list); +			} -		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list); +			pasync_ctx->async_header.host_write_ptr = 0; +			pasync_ctx->async_header.ep_read_ptr = -1; +			pasync_ctx->async_data.host_write_ptr = 0; +			pasync_ctx->async_data.ep_read_ptr = -1; +		}  	} -	pasync_ctx->async_header.host_write_ptr = 0; -	pasync_ctx->async_header.ep_read_ptr = -1; -	pasync_ctx->async_data.host_write_ptr = 0; -	pasync_ctx->async_data.ep_read_ptr = -1; -  	return 0;  } @@ -3164,7 +3339,7 @@ static int  beiscsi_create_def_hdr(struct beiscsi_hba *phba,  		       struct hwi_context_memory *phwi_context,  		       struct hwi_controller *phwi_ctrlr, -		       unsigned int def_pdu_ring_sz) +		       unsigned int def_pdu_ring_sz, uint8_t ulp_num)  {  	unsigned int idx;  	int ret; @@ -3174,36 +3349,42 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,  	void *dq_vaddress;  	idx = 0; -	dq = &phwi_context->be_def_hdrq; +	dq = &phwi_context->be_def_hdrq[ulp_num];  	cq = &phwi_context->be_cq[0];  	mem = &dq->dma_mem;  	mem_descr = phba->init_mem; -	mem_descr += HWI_MEM_ASYNC_HEADER_RING; +	mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + +		    (ulp_num * MEM_DESCR_OFFSET);  	dq_vaddress = mem_descr->mem_array[idx].virtual_address;  	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /  			    sizeof(struct phys_addr),  			    sizeof(struct phys_addr), dq_vaddress);  	if (ret) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : be_fill_queue Failed for DEF PDU HDR\n"); +			    "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", +			    ulp_num); +  		return ret;  	}  	mem->dma = (unsigned long)mem_descr->mem_array[idx].  				  bus_address.u.a64.address;  	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,  					      def_pdu_ring_sz, -					      phba->params.defpdu_hdr_sz); +					      phba->params.defpdu_hdr_sz, +					      BEISCSI_DEFQ_HDR, ulp_num);  	if (ret) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n"); +			    "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", +			    ulp_num); +  		return ret;  	} -	phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id; -	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -		    "BM_%d : iscsi def pdu id is %d\n", -		    phwi_context->be_def_hdrq.id); -	hwi_post_async_buffers(phba, 1); +	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +		    "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", +		    ulp_num, +		    phwi_context->be_def_hdrq[ulp_num].id); +	hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);  	return 0;  } @@ -3211,7 +3392,7 @@ static int  beiscsi_create_def_data(struct beiscsi_hba *phba,  			struct hwi_context_memory *phwi_context,  			struct hwi_controller *phwi_ctrlr, -			unsigned int def_pdu_ring_sz) +			unsigned int def_pdu_ring_sz, uint8_t ulp_num)  {  	unsigned int idx;  	int ret; @@ -3221,43 +3402,86 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,  	void *dq_vaddress;  	idx = 0; -	dataq = &phwi_context->be_def_dataq; +	dataq = &phwi_context->be_def_dataq[ulp_num];  	cq = &phwi_context->be_cq[0];  	mem = &dataq->dma_mem;  	mem_descr = phba->init_mem; -	mem_descr += HWI_MEM_ASYNC_DATA_RING; +	mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + +		    (ulp_num * MEM_DESCR_OFFSET);  	dq_vaddress = mem_descr->mem_array[idx].virtual_address;  	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /  			    sizeof(struct phys_addr),  			    sizeof(struct phys_addr), dq_vaddress);  	if (ret) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : be_fill_queue Failed for DEF PDU DATA\n"); +			    "BM_%d : be_fill_queue Failed for DEF PDU " +			    "DATA on ULP : %d\n", +			    ulp_num); +  		return ret;  	}  	mem->dma = (unsigned long)mem_descr->mem_array[idx].  				  bus_address.u.a64.address;  	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,  					      def_pdu_ring_sz, -					      phba->params.defpdu_data_sz); +					      phba->params.defpdu_data_sz, +					      BEISCSI_DEFQ_DATA, ulp_num);  	if (ret) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,  			    "BM_%d be_cmd_create_default_pdu_queue" -			    " Failed for DEF PDU DATA\n"); +			    " Failed for DEF PDU DATA on ULP : %d\n", +			    ulp_num);  		return ret;  	} -	phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; +  	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -		    "BM_%d : iscsi def data id is %d\n", -		    phwi_context->be_def_dataq.id); +		    "BM_%d : iscsi def data id on ULP : %d is  %d\n", +		    ulp_num, +		    phwi_context->be_def_dataq[ulp_num].id); -	hwi_post_async_buffers(phba, 0); +	hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);  	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -		    "BM_%d : DEFAULT PDU DATA RING CREATED\n"); +		    "BM_%d : DEFAULT PDU DATA RING CREATED" +		    "on ULP : %d\n", ulp_num);  	return 0;  } + +static int +beiscsi_post_template_hdr(struct beiscsi_hba *phba) +{ +	struct be_mem_descriptor *mem_descr; +	struct mem_array *pm_arr; +	struct be_dma_mem sgl; +	int status, ulp_num; + +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { +			mem_descr = (struct be_mem_descriptor *)phba->init_mem; +			mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + +				    (ulp_num * MEM_DESCR_OFFSET); +			pm_arr = mem_descr->mem_array; + +			hwi_build_be_sgl_arr(phba, pm_arr, &sgl); +			status = be_cmd_iscsi_post_template_hdr( +				 &phba->ctrl, &sgl); + +			if (status != 0) { +				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +					    "BM_%d : Post Template HDR Failed for" +					    "ULP_%d\n", ulp_num); +				return status; +			} + +			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +				    "BM_%d : Template HDR Pages Posted for" +				    "ULP_%d\n", ulp_num); +		} +	} +	return 0; +} +  static int  beiscsi_post_pages(struct beiscsi_hba *phba)  { @@ -3265,14 +3489,18 @@ beiscsi_post_pages(struct beiscsi_hba *phba)  	struct mem_array *pm_arr;  	unsigned int page_offset, i;  	struct be_dma_mem sgl; -	int status; +	int status, ulp_num = 0;  	mem_descr = phba->init_mem;  	mem_descr += HWI_MEM_SGE;  	pm_arr = mem_descr->mem_array; +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) +			break; +  	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * -			phba->fw_config.iscsi_icd_start) / PAGE_SIZE; +			phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;  	for (i = 0; i < mem_descr->num_elements; i++) {  		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);  		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, @@ -3324,13 +3552,15 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,  {  	unsigned int wrb_mem_index, offset, size, num_wrb_rings;  	u64 pa_addr_lo; -	unsigned int idx, num, i; +	unsigned int idx, num, i, ulp_num;  	struct mem_array *pwrb_arr;  	void *wrb_vaddr;  	struct be_dma_mem sgl;  	struct be_mem_descriptor *mem_descr;  	struct hwi_wrb_context *pwrb_context;  	int status; +	uint8_t ulp_count = 0, ulp_base_num = 0; +	uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };  	idx = 0;  	mem_descr = phba->init_mem; @@ -3374,14 +3604,37 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,  			num_wrb_rings--;  		}  	} + +	/* Get the ULP Count */ +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { +			ulp_count++; +			ulp_base_num = ulp_num; +			cid_count_ulp[ulp_num] = +				BEISCSI_GET_CID_COUNT(phba, ulp_num); +		} +  	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {  		wrb_mem_index = 0;  		offset = 0;  		size = 0; +		if (ulp_count > 1) { +			ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; + +			if (!cid_count_ulp[ulp_base_num]) +				ulp_base_num = (ulp_base_num + 1) % +						BEISCSI_ULP_COUNT; + +			cid_count_ulp[ulp_base_num]--; +		} + +  		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);  		status = be_cmd_wrbq_create(&phba->ctrl, &sgl, -					    &phwi_context->be_wrbq[i]); +					    &phwi_context->be_wrbq[i], +					    &phwi_ctrlr->wrb_context[i], +					    ulp_base_num);  		if (status != 0) {  			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,  				    "BM_%d : wrbq create failed."); @@ -3389,7 +3642,6 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba,  			return status;  		}  		pwrb_context = &phwi_ctrlr->wrb_context[i]; -		pwrb_context->cid = phwi_context->be_wrbq[i].id;  		BE_SET_CID_TO_CRI(i, pwrb_context->cid);  	}  	kfree(pwrb_arr); @@ -3433,10 +3685,13 @@ static void hwi_cleanup(struct beiscsi_hba *phba)  	struct hwi_controller *phwi_ctrlr;  	struct hwi_context_memory *phwi_context;  	struct hwi_async_pdu_context *pasync_ctx; -	int i, eq_num; +	int i, eq_for_mcc, ulp_num;  	phwi_ctrlr = phba->phwi_ctrlr;  	phwi_context = phwi_ctrlr->phwi_ctxt; + +	be_cmd_iscsi_remove_template_hdr(ctrl); +  	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {  		q = &phwi_context->be_wrbq[i];  		if (q->created) @@ -3445,13 +3700,20 @@ static void hwi_cleanup(struct beiscsi_hba *phba)  	kfree(phwi_context->be_wrbq);  	free_wrb_handles(phba); -	q = &phwi_context->be_def_hdrq; -	if (q->created) -		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { -	q = &phwi_context->be_def_dataq; -	if (q->created) -		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); +			q = &phwi_context->be_def_hdrq[ulp_num]; +			if (q->created) +				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + +			q = &phwi_context->be_def_dataq[ulp_num]; +			if (q->created) +				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); + +			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; +		} +	}  	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); @@ -3460,19 +3722,17 @@ static void hwi_cleanup(struct beiscsi_hba *phba)  		if (q->created)  			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);  	} + +	be_mcc_queues_destroy(phba);  	if (phba->msix_enabled) -		eq_num = 1; +		eq_for_mcc = 1;  	else -		eq_num = 0; -	for (i = 0; i < (phba->num_cpus + eq_num); i++) { +		eq_for_mcc = 0; +	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {  		q = &phwi_context->be_eq[i].q;  		if (q->created)  			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);  	} -	be_mcc_queues_destroy(phba); - -	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; -	kfree(pasync_ctx->async_entry);  	be_cmd_fw_uninit(ctrl);  } @@ -3538,8 +3798,19 @@ static void find_num_cpus(struct beiscsi_hba *phba)  				  BEISCSI_MAX_NUM_CPUS : num_cpus;  		break;  	case BE_GEN4: -		phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ? -				  OC_SKH_MAX_NUM_CPUS : num_cpus; +		/* +		 * If eqid_count == 1 fall back to +		 * INTX mechanism +		 **/ +		if (phba->fw_config.eqid_count == 1) { +			enable_msix = 0; +			phba->num_cpus = 1; +			return; +		} + +		phba->num_cpus = +			(num_cpus > (phba->fw_config.eqid_count - 1)) ? +			(phba->fw_config.eqid_count - 1) : num_cpus;  		break;  	default:  		phba->num_cpus = 1; @@ -3552,15 +3823,13 @@ static int hwi_init_port(struct beiscsi_hba *phba)  	struct hwi_context_memory *phwi_context;  	unsigned int def_pdu_ring_sz;  	struct be_ctrl_info *ctrl = &phba->ctrl; -	int status; +	int status, ulp_num; -	def_pdu_ring_sz = -		phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);  	phwi_ctrlr = phba->phwi_ctrlr;  	phwi_context = phwi_ctrlr->phwi_ctxt; -	phwi_context->max_eqd = 0; +	phwi_context->max_eqd = 128;  	phwi_context->min_eqd = 0; -	phwi_context->cur_eqd = 64; +	phwi_context->cur_eqd = 0;  	be_cmd_fw_initialize(&phba->ctrl);  	status = beiscsi_create_eqs(phba, phwi_context); @@ -3588,27 +3857,48 @@ static int hwi_init_port(struct beiscsi_hba *phba)  		goto error;  	} -	status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, -					def_pdu_ring_sz); -	if (status != 0) { -		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : Default Header not created\n"); -		goto error; +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + +			def_pdu_ring_sz = +				BEISCSI_GET_CID_COUNT(phba, ulp_num) * +				sizeof(struct phys_addr); + +			status = beiscsi_create_def_hdr(phba, phwi_context, +							phwi_ctrlr, +							def_pdu_ring_sz, +							ulp_num); +			if (status != 0) { +				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +					    "BM_%d : Default Header not created for ULP : %d\n", +					    ulp_num); +				goto error; +			} + +			status = beiscsi_create_def_data(phba, phwi_context, +							 phwi_ctrlr, +							 def_pdu_ring_sz, +							 ulp_num); +			if (status != 0) { +				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +					    "BM_%d : Default Data not created for ULP : %d\n", +					    ulp_num); +				goto error; +			} +		}  	} -	status = beiscsi_create_def_data(phba, phwi_context, -					 phwi_ctrlr, def_pdu_ring_sz); +	status = beiscsi_post_pages(phba);  	if (status != 0) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : Default Data not created\n"); +			    "BM_%d : Post SGL Pages Failed\n");  		goto error;  	} -	status = beiscsi_post_pages(phba); +	status = beiscsi_post_template_hdr(phba);  	if (status != 0) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : Post SGL Pages Failed\n"); -		goto error; +			    "BM_%d : Template HDR Posting for CXN Failed\n");  	}  	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr); @@ -3618,6 +3908,26 @@ static int hwi_init_port(struct beiscsi_hba *phba)  		goto error;  	} +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		uint16_t async_arr_idx = 0; + +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { +			uint16_t cri = 0; +			struct hwi_async_pdu_context *pasync_ctx; + +			pasync_ctx = HWI_GET_ASYNC_PDU_CTX( +				     phwi_ctrlr, ulp_num); +			for (cri = 0; cri < +			     phba->params.cxns_per_ctrl; cri++) { +				if (ulp_num == BEISCSI_GET_ULP_FROM_CRI +					       (phwi_ctrlr, cri)) +					pasync_ctx->cid_to_async_cri_map[ +					phwi_ctrlr->wrb_context[cri].cid] = +					async_arr_idx++; +			} +		} +	} +  	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,  		    "BM_%d : hwi_init_port success\n");  	return 0; @@ -3682,6 +3992,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)  			  (unsigned long)mem_descr->mem_array[j - 1].  			  bus_address.u.a64.address);  		} +  		kfree(mem_descr->mem_array);  		mem_descr++;  	} @@ -3721,6 +4032,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)  	struct sgl_handle *psgl_handle;  	struct iscsi_sge *pfrag;  	unsigned int arr_index, i, idx; +	unsigned int ulp_icd_start, ulp_num = 0;  	phba->io_sgl_hndl_avbl = 0;  	phba->eh_sgl_hndl_avbl = 0; @@ -3787,6 +4099,12 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)  		    "\n BM_%d : mem_descr_sg->num_elements=%d\n",  		    mem_descr_sg->num_elements); +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) +		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) +			break; + +	ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; +  	arr_index = 0;  	idx = 0;  	while (idx < mem_descr_sg->num_elements) { @@ -3805,8 +4123,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)  			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);  			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);  			pfrag += phba->params.num_sge_per_io; -			psgl_handle->sgl_index = -				phba->fw_config.iscsi_icd_start + arr_index++; +			psgl_handle->sgl_index = ulp_icd_start + arr_index++;  		}  		idx++;  	} @@ -3819,15 +4136,46 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)  static int hba_setup_cid_tbls(struct beiscsi_hba *phba)  { -	int i; +	int ret; +	uint16_t i, ulp_num; +	struct ulp_cid_info *ptr_cid_info = NULL; -	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, -				  GFP_KERNEL); -	if (!phba->cid_array) { -		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : Failed to allocate memory in " -			    "hba_setup_cid_tbls\n"); -		return -ENOMEM; +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { +			ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), +					       GFP_KERNEL); + +			if (!ptr_cid_info) { +				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +					    "BM_%d : Failed to allocate memory" +					    "for ULP_CID_INFO for ULP : %d\n", +					    ulp_num); +				ret = -ENOMEM; +				goto free_memory; + +			} + +			/* Allocate memory for CID array */ +			ptr_cid_info->cid_array = kzalloc(sizeof(void *) * +						  BEISCSI_GET_CID_COUNT(phba, +						  ulp_num), GFP_KERNEL); +			if (!ptr_cid_info->cid_array) { +				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +					    "BM_%d : Failed to allocate memory" +					    "for CID_ARRAY for ULP : %d\n", +					    ulp_num); +				kfree(ptr_cid_info); +				ptr_cid_info = NULL; +				ret = -ENOMEM; + +				goto free_memory; +			} +			ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( +						   phba, ulp_num); + +			/* Save the cid_info_array ptr */ +			phba->cid_array_info[ulp_num] = ptr_cid_info; +		}  	}  	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *  				 phba->params.cxns_per_ctrl, GFP_KERNEL); @@ -3835,9 +4183,9 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,  			    "BM_%d : Failed to allocate memory in "  			    "hba_setup_cid_tbls\n"); -		kfree(phba->cid_array); -		phba->cid_array = NULL; -		return -ENOMEM; +		ret = -ENOMEM; + +		goto free_memory;  	}  	phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * @@ -3847,18 +4195,46 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)  			    "BM_%d : Failed to allocate memory in"  			    "hba_setup_cid_tbls\n"); -		kfree(phba->cid_array);  		kfree(phba->ep_array); -		phba->cid_array = NULL;  		phba->ep_array = NULL; -		return -ENOMEM; +		ret = -ENOMEM; + +		goto free_memory; +	} + +	for (i = 0; i < phba->params.cxns_per_ctrl; i++) { +		ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; + +		ptr_cid_info = phba->cid_array_info[ulp_num]; +		ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = +			phba->phwi_ctrlr->wrb_context[i].cid; +  	} -	for (i = 0; i < phba->params.cxns_per_ctrl; i++) -		phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid; +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { +			ptr_cid_info = phba->cid_array_info[ulp_num]; -	phba->avlbl_cids = phba->params.cxns_per_ctrl; +			ptr_cid_info->cid_alloc = 0; +			ptr_cid_info->cid_free = 0; +		} +	}  	return 0; + +free_memory: +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { +			ptr_cid_info = phba->cid_array_info[ulp_num]; + +			if (ptr_cid_info) { +				kfree(ptr_cid_info->cid_array); +				kfree(ptr_cid_info); +				phba->cid_array_info[ulp_num] = NULL; +			} +		} +	} + +	return ret;  }  static void hwi_enable_intr(struct beiscsi_hba *phba) @@ -3968,12 +4344,16 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)  		goto boot_freemem;  	} -	ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); +	ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd);  	if (ret) {  		beiscsi_log(phba, KERN_ERR,  			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,  			    "BM_%d : beiscsi_get_session_info Failed"); -		goto boot_freemem; + +		if (ret != -EBUSY) +			goto boot_freemem; +		else +			return ret;  	}  	session_resp = nonemb_cmd.va ; @@ -4113,20 +4493,39 @@ static void hwi_purge_eq(struct beiscsi_hba *phba)  static void beiscsi_clean_port(struct beiscsi_hba *phba)  { -	int mgmt_status; - -	mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); -	if (mgmt_status) -		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, -			    "BM_%d : mgmt_epfw_cleanup FAILED\n"); +	int mgmt_status, ulp_num; +	struct ulp_cid_info *ptr_cid_info = NULL; + +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { +			mgmt_status = mgmt_epfw_cleanup(phba, ulp_num); +			if (mgmt_status) +				beiscsi_log(phba, KERN_WARNING, +					    BEISCSI_LOG_INIT, +					    "BM_%d : mgmt_epfw_cleanup FAILED" +					    " for ULP_%d\n", ulp_num); +		} +	}  	hwi_purge_eq(phba);  	hwi_cleanup(phba);  	kfree(phba->io_sgl_hndl_base);  	kfree(phba->eh_sgl_hndl_base); -	kfree(phba->cid_array);  	kfree(phba->ep_array);  	kfree(phba->conn_table); + +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { +			ptr_cid_info = phba->cid_array_info[ulp_num]; + +			if (ptr_cid_info) { +				kfree(ptr_cid_info->cid_array); +				kfree(ptr_cid_info); +				phba->cid_array_info[ulp_num] = NULL; +			} +		} +	} +  }  /** @@ -4214,6 +4613,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)  			spin_unlock(&phba->io_sgl_lock);  			io_task->psgl_handle = NULL;  		} + +		if (io_task->scsi_cmnd) { +			scsi_dma_unmap(io_task->scsi_cmnd); +			io_task->scsi_cmnd = NULL; +		}  	} else {  		if (!beiscsi_conn->login_in_progress)  			beiscsi_free_mgmt_task_handles(beiscsi_conn, task); @@ -4235,9 +4639,9 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,  	 * login/startup related tasks.  	 */  	beiscsi_conn->login_in_progress = 0; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->back_lock);  	beiscsi_cleanup_task(task); -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->back_lock);  	pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); @@ -4255,8 +4659,8 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,  	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)  			     << DB_DEF_PDU_WRB_INDEX_SHIFT;  	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; - -	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); +	iowrite32(doorbell, phba->db_va + +		  beiscsi_conn->doorbell_offset);  }  static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, @@ -4481,7 +4885,8 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,  		     DB_DEF_PDU_WRB_INDEX_MASK) <<  		     DB_DEF_PDU_WRB_INDEX_SHIFT;  	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; -	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); +	iowrite32(doorbell, phba->db_va + +		  beiscsi_conn->doorbell_offset);  	return 0;  } @@ -4536,7 +4941,8 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,  		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;  	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; -	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); +	iowrite32(doorbell, phba->db_va + +		  beiscsi_conn->doorbell_offset);  	return 0;  } @@ -4638,7 +5044,8 @@ static int beiscsi_mtask(struct iscsi_task *task)  	doorbell |= (io_task->pwrb_handle->wrb_index &  		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;  	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; -	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); +	iowrite32(doorbell, phba->db_va + +		  beiscsi_conn->doorbell_offset);  	return 0;  } @@ -4663,8 +5070,12 @@ static int beiscsi_task_xmit(struct iscsi_task *task)  		struct beiscsi_hba *phba = NULL;  		phba = ((struct beiscsi_conn *)conn->dd_data)->phba; -		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO, -			    "BM_%d : scsi_dma_map Failed\n"); +		beiscsi_log(phba, KERN_ERR, +			    BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, +			    "BM_%d : scsi_dma_map Failed " +			    "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", +			    be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), +			    io_task->libiscsi_itt, scsi_bufflen(sc));  		return num_sg;  	} @@ -4769,10 +5180,12 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)  /*   * beiscsi_quiesce()- Cleanup Driver resources   * @phba: Instance Priv structure + * @unload_state:i Clean or EEH unload state   *   * Free the OS and HW resources held by the driver   **/ -static void beiscsi_quiesce(struct beiscsi_hba *phba) +static void beiscsi_quiesce(struct beiscsi_hba *phba, +		uint32_t unload_state)  {  	struct hwi_controller *phwi_ctrlr;  	struct hwi_context_memory *phwi_context; @@ -4785,28 +5198,36 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba)  	if (phba->msix_enabled) {  		for (i = 0; i <= phba->num_cpus; i++) {  			msix_vec = phba->msix_entries[i].vector; +			synchronize_irq(msix_vec);  			free_irq(msix_vec, &phwi_context->be_eq[i]);  			kfree(phba->msi_name[i]);  		}  	} else -		if (phba->pcidev->irq) +		if (phba->pcidev->irq) { +			synchronize_irq(phba->pcidev->irq);  			free_irq(phba->pcidev->irq, phba); -	pci_disable_msix(phba->pcidev); -	destroy_workqueue(phba->wq); -	if (blk_iopoll_enabled) -		for (i = 0; i < phba->num_cpus; i++) { -			pbe_eq = &phwi_context->be_eq[i]; -			blk_iopoll_disable(&pbe_eq->iopoll);  		} +	pci_disable_msix(phba->pcidev); -	beiscsi_clean_port(phba); -	beiscsi_free_mem(phba); +	for (i = 0; i < phba->num_cpus; i++) { +		pbe_eq = &phwi_context->be_eq[i]; +		blk_iopoll_disable(&pbe_eq->iopoll); +	} -	beiscsi_unmap_pci_function(phba); -	pci_free_consistent(phba->pcidev, -			    phba->ctrl.mbox_mem_alloced.size, -			    phba->ctrl.mbox_mem_alloced.va, -			    phba->ctrl.mbox_mem_alloced.dma); +	if (unload_state == BEISCSI_CLEAN_UNLOAD) { +		destroy_workqueue(phba->wq); +		beiscsi_clean_port(phba); +		beiscsi_free_mem(phba); + +		beiscsi_unmap_pci_function(phba); +		pci_free_consistent(phba->pcidev, +				    phba->ctrl.mbox_mem_alloced.size, +				    phba->ctrl.mbox_mem_alloced.va, +				    phba->ctrl.mbox_mem_alloced.dma); +	} else { +		hwi_purge_eq(phba); +		hwi_cleanup(phba); +	}  	cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);  } @@ -4823,11 +5244,13 @@ static void beiscsi_remove(struct pci_dev *pcidev)  	}  	beiscsi_destroy_def_ifaces(phba); -	beiscsi_quiesce(phba); +	beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);  	iscsi_boot_destroy_kset(phba->boot_kset);  	iscsi_host_remove(phba->shost);  	pci_dev_put(phba->pcidev);  	iscsi_host_free(phba->shost); +	pci_disable_pcie_error_reporting(pcidev); +	pci_set_drvdata(pcidev, NULL);  	pci_disable_device(pcidev);  } @@ -4842,7 +5265,9 @@ static void beiscsi_shutdown(struct pci_dev *pcidev)  		return;  	} -	beiscsi_quiesce(phba); +	phba->state = BE_ADAPTER_STATE_SHUTDOWN; +	iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); +	beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD);  	pci_disable_device(pcidev);  } @@ -4861,6 +5286,57 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)  	return;  } +static void be_eqd_update(struct beiscsi_hba *phba) +{ +	struct be_set_eqd set_eqd[MAX_CPUS]; +	struct be_aic_obj *aic; +	struct be_eq_obj *pbe_eq; +	struct hwi_controller *phwi_ctrlr; +	struct hwi_context_memory *phwi_context; +	int eqd, i, num = 0; +	ulong now; +	u32 pps, delta; +	unsigned int tag; + +	phwi_ctrlr = phba->phwi_ctrlr; +	phwi_context = phwi_ctrlr->phwi_ctxt; + +	for (i = 0; i <= phba->num_cpus; i++) { +		aic = &phba->aic_obj[i]; +		pbe_eq = &phwi_context->be_eq[i]; +		now = jiffies; +		if (!aic->jiffs || time_before(now, aic->jiffs) || +		    pbe_eq->cq_count < aic->eq_prev) { +			aic->jiffs = now; +			aic->eq_prev = pbe_eq->cq_count; +			continue; +		} +		delta = jiffies_to_msecs(now - aic->jiffs); +		pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); +		eqd = (pps / 1500) << 2; + +		if (eqd < 8) +			eqd = 0; +		eqd = min_t(u32, eqd, phwi_context->max_eqd); +		eqd = max_t(u32, eqd, phwi_context->min_eqd); + +		aic->jiffs = now; +		aic->eq_prev = pbe_eq->cq_count; + +		if (eqd != aic->prev_eqd) { +			set_eqd[num].delay_multiplier = (eqd * 65)/100; +			set_eqd[num].eq_id = pbe_eq->q.id; +			aic->prev_eqd = eqd; +			num++; +		} +	} +	if (num) { +		tag = be_cmd_modify_eq_delay(phba, set_eqd, num); +		if (tag) +			beiscsi_mccq_compl(phba, tag, NULL, NULL); +	} +} +  /*   * beiscsi_hw_health_check()- Check adapter health   * @work: work item to check HW health @@ -4874,12 +5350,161 @@ beiscsi_hw_health_check(struct work_struct *work)  		container_of(work, struct beiscsi_hba,  			     beiscsi_hw_check_task.work); +	be_eqd_update(phba); +  	beiscsi_ue_detect(phba);  	schedule_delayed_work(&phba->beiscsi_hw_check_task,  			      msecs_to_jiffies(1000));  } + +static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, +		pci_channel_state_t state) +{ +	struct beiscsi_hba *phba = NULL; + +	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); +	phba->state |= BE_ADAPTER_PCI_ERR; + +	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +		    "BM_%d : EEH error detected\n"); + +	beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD); + +	if (state == pci_channel_io_perm_failure) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +			    "BM_%d : EEH : State PERM Failure"); +		return PCI_ERS_RESULT_DISCONNECT; +	} + +	pci_disable_device(pdev); + +	/* The error could cause the FW to trigger a flash debug dump. +	 * Resetting the card while flash dump is in progress +	 * can cause it not to recover; wait for it to finish. +	 * Wait only for first function as it is needed only once per +	 * adapter. +	 **/ +	if (pdev->devfn == 0) +		ssleep(30); + +	return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) +{ +	struct beiscsi_hba *phba = NULL; +	int status = 0; + +	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); + +	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +		    "BM_%d : EEH Reset\n"); + +	status = pci_enable_device(pdev); +	if (status) +		return PCI_ERS_RESULT_DISCONNECT; + +	pci_set_master(pdev); +	pci_set_power_state(pdev, PCI_D0); +	pci_restore_state(pdev); + +	/* Wait for the CHIP Reset to complete */ +	status = be_chk_reset_complete(phba); +	if (!status) { +		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, +			    "BM_%d : EEH Reset Completed\n"); +	} else { +		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, +			    "BM_%d : EEH Reset Completion Failure\n"); +		return PCI_ERS_RESULT_DISCONNECT; +	} + +	pci_cleanup_aer_uncorrect_error_status(pdev); +	return PCI_ERS_RESULT_RECOVERED; +} + +static void beiscsi_eeh_resume(struct pci_dev *pdev) +{ +	int ret = 0, i; +	struct be_eq_obj *pbe_eq; +	struct beiscsi_hba *phba = NULL; +	struct hwi_controller *phwi_ctrlr; +	struct hwi_context_memory *phwi_context; + +	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); +	pci_save_state(pdev); + +	if (enable_msix) +		find_num_cpus(phba); +	else +		phba->num_cpus = 1; + +	if (enable_msix) { +		beiscsi_msix_enable(phba); +		if (!phba->msix_enabled) +			phba->num_cpus = 1; +	} + +	ret = beiscsi_cmd_reset_function(phba); +	if (ret) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +			    "BM_%d : Reset Failed\n"); +		goto ret_err; +	} + +	ret = be_chk_reset_complete(phba); +	if (ret) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +			    "BM_%d : Failed to get out of reset.\n"); +		goto ret_err; +	} + +	beiscsi_get_params(phba); +	phba->shost->max_id = phba->params.cxns_per_ctrl; +	phba->shost->can_queue = phba->params.ios_per_ctrl; +	ret = hwi_init_controller(phba); + +	for (i = 0; i < MAX_MCC_CMD; i++) { +		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); +		phba->ctrl.mcc_tag[i] = i + 1; +		phba->ctrl.mcc_numtag[i + 1] = 0; +		phba->ctrl.mcc_tag_available++; +	} + +	phwi_ctrlr = phba->phwi_ctrlr; +	phwi_context = phwi_ctrlr->phwi_ctxt; + +	for (i = 0; i < phba->num_cpus; i++) { +		pbe_eq = &phwi_context->be_eq[i]; +		blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, +				be_iopoll); +		blk_iopoll_enable(&pbe_eq->iopoll); +	} + +	i = (phba->msix_enabled) ? i : 0; +	/* Work item for MCC handling */ +	pbe_eq = &phwi_context->be_eq[i]; +	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); + +	ret = beiscsi_init_irqs(phba); +	if (ret < 0) { +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +			    "BM_%d : beiscsi_eeh_resume - " +			    "Failed to beiscsi_init_irqs\n"); +		goto ret_err; +	} + +	hwi_enable_intr(phba); +	phba->state &= ~BE_ADAPTER_PCI_ERR; + +	return; +ret_err: +	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, +		    "BM_%d : AER EEH Resume Failed\n"); +} +  static int beiscsi_dev_probe(struct pci_dev *pcidev,  			     const struct pci_device_id *id)  { @@ -4887,7 +5512,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,  	struct hwi_controller *phwi_ctrlr;  	struct hwi_context_memory *phwi_context;  	struct be_eq_obj *pbe_eq; -	int ret, i; +	int ret = 0, i;  	ret = beiscsi_enable_pci(pcidev);  	if (ret < 0) { @@ -4903,10 +5528,20 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,  		goto disable_pci;  	} +	/* Enable EEH reporting */ +	ret = pci_enable_pcie_error_reporting(pcidev); +	if (ret) +		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, +			    "BM_%d : PCIe Error Reporting " +			    "Enabling Failed\n"); + +	pci_save_state(pcidev); +  	/* Initialize Driver configuration Paramters */  	beiscsi_hba_attrs_init(phba);  	phba->fw_timeout = false; +	phba->mac_addr_set = false;  	switch (pcidev->device) { @@ -4929,20 +5564,6 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,  		phba->generation = 0;  	} -	if (enable_msix) -		find_num_cpus(phba); -	else -		phba->num_cpus = 1; - -	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -		    "BM_%d : num_cpus = %d\n", -		    phba->num_cpus); - -	if (enable_msix) { -		beiscsi_msix_enable(phba); -		if (!phba->msix_enabled) -			phba->num_cpus = 1; -	}  	ret = be_ctrl_init(phba, pcidev);  	if (ret) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -4954,27 +5575,43 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,  	ret = beiscsi_cmd_reset_function(phba);  	if (ret) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : Reset Failed. Aborting Crashdump\n"); +			    "BM_%d : Reset Failed\n");  		goto hba_free;  	}  	ret = be_chk_reset_complete(phba);  	if (ret) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, -			    "BM_%d : Failed to get out of reset." -			    "Aborting Crashdump\n"); +			    "BM_%d : Failed to get out of reset.\n");  		goto hba_free;  	}  	spin_lock_init(&phba->io_sgl_lock);  	spin_lock_init(&phba->mgmt_sgl_lock);  	spin_lock_init(&phba->isr_lock); +	spin_lock_init(&phba->async_pdu_lock);  	ret = mgmt_get_fw_config(&phba->ctrl, phba);  	if (ret != 0) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,  			    "BM_%d : Error getting fw config\n");  		goto free_port;  	} -	phba->shost->max_id = phba->fw_config.iscsi_cid_count; + +	if (enable_msix) +		find_num_cpus(phba); +	else +		phba->num_cpus = 1; + +	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +		    "BM_%d : num_cpus = %d\n", +		    phba->num_cpus); + +	if (enable_msix) { +		beiscsi_msix_enable(phba); +		if (!phba->msix_enabled) +			phba->num_cpus = 1; +	} + +	phba->shost->max_id = phba->params.cxns_per_ctrl;  	beiscsi_get_params(phba);  	phba->shost->can_queue = phba->params.ios_per_ctrl;  	ret = beiscsi_init_port(phba); @@ -4985,11 +5622,13 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,  		goto free_port;  	} -	for (i = 0; i < MAX_MCC_CMD ; i++) { +	for (i = 0; i < MAX_MCC_CMD; i++) {  		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);  		phba->ctrl.mcc_tag[i] = i + 1;  		phba->ctrl.mcc_numtag[i + 1] = 0;  		phba->ctrl.mcc_tag_available++; +		memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, +		       sizeof(struct be_dma_mem));  	}  	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; @@ -5010,32 +5649,18 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,  	phwi_ctrlr = phba->phwi_ctrlr;  	phwi_context = phwi_ctrlr->phwi_ctxt; -	if (blk_iopoll_enabled) { -		for (i = 0; i < phba->num_cpus; i++) { -			pbe_eq = &phwi_context->be_eq[i]; -			blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, -					be_iopoll); -			blk_iopoll_enable(&pbe_eq->iopoll); -		} - -		i = (phba->msix_enabled) ? i : 0; -		/* Work item for MCC handling */ +	for (i = 0; i < phba->num_cpus; i++) {  		pbe_eq = &phwi_context->be_eq[i]; -		INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); -	} else { -		if (phba->msix_enabled) { -			for (i = 0; i <= phba->num_cpus; i++) { -				pbe_eq = &phwi_context->be_eq[i]; -				INIT_WORK(&pbe_eq->work_cqs, -					  beiscsi_process_all_cqs); -			} -		} else { -				pbe_eq = &phwi_context->be_eq[0]; -				INIT_WORK(&pbe_eq->work_cqs, -					  beiscsi_process_all_cqs); -			} +		blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, +				be_iopoll); +		blk_iopoll_enable(&pbe_eq->iopoll);  	} +	i = (phba->msix_enabled) ? i : 0; +	/* Work item for MCC handling */ +	pbe_eq = &phwi_context->be_eq[i]; +	INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); +  	ret = beiscsi_init_irqs(phba);  	if (ret < 0) {  		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, @@ -5045,6 +5670,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,  	}  	hwi_enable_intr(phba); +	if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) +		goto free_blkenbld; +  	if (beiscsi_setup_boot_info(phba))  		/*  		 * log error but continue, because we may not be using @@ -5064,11 +5692,10 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,  free_blkenbld:  	destroy_workqueue(phba->wq); -	if (blk_iopoll_enabled) -		for (i = 0; i < phba->num_cpus; i++) { -			pbe_eq = &phwi_context->be_eq[i]; -			blk_iopoll_disable(&pbe_eq->iopoll); -		} +	for (i = 0; i < phba->num_cpus; i++) { +		pbe_eq = &phwi_context->be_eq[i]; +		blk_iopoll_disable(&pbe_eq->iopoll); +	}  free_twq:  	beiscsi_clean_port(phba);  	beiscsi_free_mem(phba); @@ -5089,6 +5716,12 @@ disable_pci:  	return ret;  } +static struct pci_error_handlers beiscsi_eeh_handlers = { +	.error_detected = beiscsi_eeh_err_detected, +	.slot_reset = beiscsi_eeh_reset, +	.resume = beiscsi_eeh_resume, +}; +  struct iscsi_transport beiscsi_iscsi_transport = {  	.owner = THIS_MODULE,  	.name = DRV_NAME, @@ -5127,7 +5760,8 @@ static struct pci_driver beiscsi_pci_driver = {  	.probe = beiscsi_dev_probe,  	.remove = beiscsi_remove,  	.shutdown = beiscsi_shutdown, -	.id_table = beiscsi_pci_id_table +	.id_table = beiscsi_pci_id_table, +	.err_handler = &beiscsi_eeh_handlers  }; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 2c06ef3c02a..9ceab426eec 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -26,6 +26,7 @@  #include <linux/in.h>  #include <linux/ctype.h>  #include <linux/module.h> +#include <linux/aer.h>  #include <scsi/scsi.h>  #include <scsi/scsi_cmnd.h>  #include <scsi/scsi_device.h> @@ -34,9 +35,8 @@  #include <scsi/libiscsi.h>  #include <scsi/scsi_transport_iscsi.h> -#include "be.h"  #define DRV_NAME		"be2iscsi" -#define BUILD_STR		"10.0.467.0" +#define BUILD_STR		"10.2.273.0"  #define BE_NAME			"Emulex OneConnect" \  				"Open-iSCSI Driver version" BUILD_STR  #define DRV_DESC		BE_NAME " " "Driver" @@ -66,14 +66,14 @@  #define MAX_CPUS		64  #define BEISCSI_MAX_NUM_CPUS	7 -#define OC_SKH_MAX_NUM_CPUS	31  #define BEISCSI_VER_STRLEN 32  #define BEISCSI_SGLIST_ELEMENTS	30 -#define BEISCSI_CMD_PER_LUN	128	/* scsi_host->cmd_per_lun */ -#define BEISCSI_MAX_SECTORS	2048	/* scsi_host->max_sectors */ +#define BEISCSI_CMD_PER_LUN	128 /* scsi_host->cmd_per_lun */ +#define BEISCSI_MAX_SECTORS	1024 /* scsi_host->max_sectors */ +#define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */  #define BEISCSI_MAX_CMD_LEN	16	/* scsi_host->max_cmd_len */  #define BEISCSI_NUM_MAX_LUN	256	/* scsi_host->max_lun */ @@ -97,14 +97,24 @@  #define INVALID_SESS_HANDLE	0xFFFFFFFF -#define BE_ADAPTER_UP		0x00000000 -#define BE_ADAPTER_LINK_DOWN	0x00000001 +/** + * Adapter States + **/ +#define BE_ADAPTER_LINK_UP	0x001 +#define BE_ADAPTER_LINK_DOWN	0x002 +#define BE_ADAPTER_PCI_ERR	0x004 +#define BE_ADAPTER_STATE_SHUTDOWN	0x008 + + +#define BEISCSI_CLEAN_UNLOAD	0x01 +#define BEISCSI_EEH_UNLOAD	0x02  /**   * hardware needs the async PDU buffers to be posted in multiples of 8   * So have atleast 8 of them by default   */ -#define HWI_GET_ASYNC_PDU_CTX(phwi)	(phwi->phwi_ctxt->pasync_ctx) +#define HWI_GET_ASYNC_PDU_CTX(phwi, ulp_num)	\ +	(phwi->phwi_ctxt->pasync_ctx[ulp_num])  /********* Memory BAR register ************/  #define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET	0xfc @@ -130,11 +140,15 @@  #define DB_RXULP0_OFFSET 0xA0  /********* Event Q door bell *************/  #define DB_EQ_OFFSET			DB_CQ_OFFSET -#define DB_EQ_RING_ID_MASK		0x1FF	/* bits 0 - 8 */ +#define DB_EQ_RING_ID_LOW_MASK		0x1FF	/* bits 0 - 8 */  /* Clear the interrupt for this eq */  #define DB_EQ_CLR_SHIFT			(9)	/* bit 9 */  /* Must be 1 */  #define DB_EQ_EVNT_SHIFT		(10)	/* bit 10 */ +/* Higher Order EQ_ID bit */ +#define DB_EQ_RING_ID_HIGH_MASK	0x1F /* bits 11 - 15 */ +#define DB_EQ_HIGH_SET_SHIFT	11 +#define DB_EQ_HIGH_FEILD_SHIFT	9  /* Number of event entries processed */  #define DB_EQ_NUM_POPPED_SHIFT		(16)	/* bits 16 - 28 */  /* Rearm bit */ @@ -142,36 +156,53 @@  /********* Compl Q door bell *************/  #define DB_CQ_OFFSET			0x120 -#define DB_CQ_RING_ID_MASK		0x3FF	/* bits 0 - 9 */ +#define DB_CQ_RING_ID_LOW_MASK		0x3FF	/* bits 0 - 9 */ +/* Higher Order CQ_ID bit */ +#define DB_CQ_RING_ID_HIGH_MASK	0x1F /* bits 11 - 15 */ +#define DB_CQ_HIGH_SET_SHIFT	11 +#define DB_CQ_HIGH_FEILD_SHIFT	10 +  /* Number of event entries processed */  #define DB_CQ_NUM_POPPED_SHIFT		(16)	/* bits 16 - 28 */  /* Rearm bit */  #define DB_CQ_REARM_SHIFT		(29)	/* bit 29 */  #define GET_HWI_CONTROLLER_WS(pc)	(pc->phwi_ctrlr) -#define HWI_GET_DEF_BUFQ_ID(pc) (((struct hwi_controller *)\ -		(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data.id) -#define HWI_GET_DEF_HDRQ_ID(pc) (((struct hwi_controller *)\ -		(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr.id) +#define HWI_GET_DEF_BUFQ_ID(pc, ulp_num) (((struct hwi_controller *)\ +		(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_data[ulp_num].id) +#define HWI_GET_DEF_HDRQ_ID(pc, ulp_num) (((struct hwi_controller *)\ +		(GET_HWI_CONTROLLER_WS(pc)))->default_pdu_hdr[ulp_num].id)  #define PAGES_REQUIRED(x) \  	((x < PAGE_SIZE) ? 1 :  ((x + PAGE_SIZE - 1) / PAGE_SIZE))  #define BEISCSI_MSI_NAME 20 /* size of msi_name string */ +#define MEM_DESCR_OFFSET 8 +#define BEISCSI_DEFQ_HDR 1 +#define BEISCSI_DEFQ_DATA 0  enum be_mem_enum {  	HWI_MEM_ADDN_CONTEXT,  	HWI_MEM_WRB,  	HWI_MEM_WRBH,  	HWI_MEM_SGLH,  	HWI_MEM_SGE, -	HWI_MEM_ASYNC_HEADER_BUF,	/* 5 */ -	HWI_MEM_ASYNC_DATA_BUF, -	HWI_MEM_ASYNC_HEADER_RING, -	HWI_MEM_ASYNC_DATA_RING, -	HWI_MEM_ASYNC_HEADER_HANDLE, -	HWI_MEM_ASYNC_DATA_HANDLE,	/* 10 */ -	HWI_MEM_ASYNC_PDU_CONTEXT, +	HWI_MEM_TEMPLATE_HDR_ULP0, +	HWI_MEM_ASYNC_HEADER_BUF_ULP0,	/* 6 */ +	HWI_MEM_ASYNC_DATA_BUF_ULP0, +	HWI_MEM_ASYNC_HEADER_RING_ULP0, +	HWI_MEM_ASYNC_DATA_RING_ULP0, +	HWI_MEM_ASYNC_HEADER_HANDLE_ULP0, +	HWI_MEM_ASYNC_DATA_HANDLE_ULP0,	/* 11 */ +	HWI_MEM_ASYNC_PDU_CONTEXT_ULP0, +	HWI_MEM_TEMPLATE_HDR_ULP1, +	HWI_MEM_ASYNC_HEADER_BUF_ULP1,	/* 14 */ +	HWI_MEM_ASYNC_DATA_BUF_ULP1, +	HWI_MEM_ASYNC_HEADER_RING_ULP1, +	HWI_MEM_ASYNC_DATA_RING_ULP1, +	HWI_MEM_ASYNC_HEADER_HANDLE_ULP1, +	HWI_MEM_ASYNC_DATA_HANDLE_ULP1,	/* 19 */ +	HWI_MEM_ASYNC_PDU_CONTEXT_ULP1,  	ISCSI_MEM_GLOBAL_HEADER,  	SE_MEM_MAX  }; @@ -266,9 +297,49 @@ struct invalidate_command_table {  	unsigned short cid;  } __packed; +#define BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cri) \ +	(phwi_ctrlr->wrb_context[cri].ulp_num) +struct hwi_wrb_context { +	struct list_head wrb_handle_list; +	struct list_head wrb_handle_drvr_list; +	struct wrb_handle **pwrb_handle_base; +	struct wrb_handle **pwrb_handle_basestd; +	struct iscsi_wrb *plast_wrb; +	unsigned short alloc_index; +	unsigned short free_index; +	unsigned short wrb_handles_available; +	unsigned short cid; +	uint8_t ulp_num;	/* ULP to which CID binded */ +	uint16_t register_set; +	uint16_t doorbell_format; +	uint32_t doorbell_offset; +}; + +struct ulp_cid_info { +	unsigned short *cid_array; +	unsigned short avlbl_cids; +	unsigned short cid_alloc; +	unsigned short cid_free; +}; + +#include "be.h"  #define chip_be2(phba)      (phba->generation == BE_GEN2)  #define chip_be3_r(phba)    (phba->generation == BE_GEN3)  #define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) + +#define BEISCSI_ULP0    0 +#define BEISCSI_ULP1    1 +#define BEISCSI_ULP_COUNT   2 +#define BEISCSI_ULP0_LOADED 0x01 +#define BEISCSI_ULP1_LOADED 0x02 + +#define BEISCSI_ULP_AVLBL_CID(phba, ulp_num) \ +	(((struct ulp_cid_info *)phba->cid_array_info[ulp_num])->avlbl_cids) +#define BEISCSI_ULP0_AVLBL_CID(phba) \ +	BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP0) +#define BEISCSI_ULP1_AVLBL_CID(phba) \ +	BEISCSI_ULP_AVLBL_CID(phba, BEISCSI_ULP1) +  struct beiscsi_hba {  	struct hba_parameters params;  	struct hwi_controller *phwi_ctrlr; @@ -303,17 +374,15 @@ struct beiscsi_hba {  	spinlock_t io_sgl_lock;  	spinlock_t mgmt_sgl_lock;  	spinlock_t isr_lock; +	spinlock_t async_pdu_lock;  	unsigned int age; -	unsigned short avlbl_cids; -	unsigned short cid_alloc; -	unsigned short cid_free;  	struct list_head hba_queue;  #define BE_MAX_SESSION 2048  #define BE_SET_CID_TO_CRI(cri_index, cid) \  			  (phba->cid_to_cri_map[cid] = cri_index)  #define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid])  	unsigned short cid_to_cri_map[BE_MAX_SESSION]; -	unsigned short *cid_array; +	struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];  	struct iscsi_endpoint **ep_array;  	struct beiscsi_conn **conn_table;  	struct iscsi_boot_kset *boot_kset; @@ -325,20 +394,21 @@ struct beiscsi_hba {  		 * group together since they are used most frequently  		 * for cid to cri conversion  		 */ -		unsigned int iscsi_cid_start;  		unsigned int phys_port; +		unsigned int eqid_count; +		unsigned int cqid_count; +		unsigned int iscsi_cid_start[BEISCSI_ULP_COUNT]; +#define BEISCSI_GET_CID_COUNT(phba, ulp_num) \ +		(phba->fw_config.iscsi_cid_count[ulp_num]) +		unsigned int iscsi_cid_count[BEISCSI_ULP_COUNT]; +		unsigned int iscsi_icd_count[BEISCSI_ULP_COUNT]; +		unsigned int iscsi_icd_start[BEISCSI_ULP_COUNT]; +		unsigned int iscsi_chain_start[BEISCSI_ULP_COUNT]; +		unsigned int iscsi_chain_count[BEISCSI_ULP_COUNT]; -		unsigned int isr_offset; -		unsigned int iscsi_icd_start; -		unsigned int iscsi_cid_count; -		unsigned int iscsi_icd_count; -		unsigned int pci_function; - -		unsigned short cid_alloc; -		unsigned short cid_free; -		unsigned short avlbl_cids;  		unsigned short iscsi_features; -		spinlock_t cid_lock; +		uint16_t dual_ulp_aware; +		unsigned long ulp_supported;  	} fw_config;  	unsigned int state; @@ -346,6 +416,7 @@ struct beiscsi_hba {  	bool ue_detected;  	struct delayed_work beiscsi_hw_check_task; +	bool mac_addr_set;  	u8 mac_address[ETH_ALEN];  	char fw_ver_str[BEISCSI_VER_STRLEN];  	char wq_name[20]; @@ -356,6 +427,7 @@ struct beiscsi_hba {  	struct mgmt_session_info boot_sess;  	struct invalidate_command_table inv_tbl[128]; +	struct be_aic_obj aic_obj[MAX_CPUS];  	unsigned int attr_log_enable;  	int (*iotask_fn)(struct iscsi_task *,  			struct scatterlist *sg, @@ -374,6 +446,7 @@ struct beiscsi_conn {  	struct iscsi_conn *conn;  	struct beiscsi_hba *phba;  	u32 exp_statsn; +	u32 doorbell_offset;  	u32 beiscsi_conn_cid;  	struct beiscsi_endpoint *ep;  	unsigned short login_in_progress; @@ -474,7 +547,7 @@ struct amap_iscsi_sge {  };  struct beiscsi_offload_params { -	u32 dw[5]; +	u32 dw[6];  };  #define OFFLD_PARAMS_ERL	0x00000003 @@ -504,6 +577,7 @@ struct amap_beiscsi_offload_params {  	u8 max_r2t[16];  	u8 pad[8];  	u8 exp_statsn[32]; +	u8 max_recv_data_segment_length[32];  };  /* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, @@ -567,7 +641,8 @@ struct hwi_async_pdu_context {  	unsigned int buffer_size;  	unsigned int num_entries; - +#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid]) +	unsigned short cid_to_async_cri_map[BE_MAX_SESSION];  	/**  	 * This is a varying size list! Do not add anything  	 * after this entry!! @@ -761,6 +836,9 @@ void beiscsi_process_all_cqs(struct work_struct *work);  void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,  				     struct iscsi_task *task); +void hwi_ring_cq_db(struct beiscsi_hba *phba, +		     unsigned int id, unsigned int num_processed, +		     unsigned char rearm, unsigned char event);  static inline bool beiscsi_error(struct beiscsi_hba *phba)  {  	return phba->ue_detected || phba->fw_timeout; @@ -885,30 +963,32 @@ struct amap_iscsi_target_context_update_wrb_v2 {  	u8 first_burst_length[24]; /* DWORD 3 */  	u8 rsvd3[8]; /* DOWRD 3 */  	u8 max_r2t[16]; /* DWORD 4 */ -	u8 rsvd4[10];   /* DWORD 4 */ +	u8 rsvd4;       /* DWORD 4 */  	u8 hde;         /* DWORD 4 */  	u8 dde;         /* DWORD 4 */  	u8 erl[2];      /* DWORD 4 */ +	u8 rsvd5[6];    /* DWORD 4 */  	u8 imd;         /* DWORD 4 */  	u8 ir2t;        /* DWORD 4 */ +	u8 rsvd6[3];    /* DWORD 4 */  	u8 stat_sn[32];     /* DWORD 5 */ -	u8 rsvd5[32];   /* DWORD 6 */ -	u8 rsvd6[32];   /* DWORD 7 */ +	u8 rsvd7[32];   /* DWORD 6 */ +	u8 rsvd8[32];   /* DWORD 7 */  	u8 max_recv_dataseg_len[24];    /* DWORD 8 */ -	u8 rsvd7[8]; /* DWORD 8 */ -	u8 rsvd8[32];   /* DWORD 9 */ -	u8 rsvd9[32];   /* DWORD 10 */ +	u8 rsvd9[8]; /* DWORD 8 */ +	u8 rsvd10[32];   /* DWORD 9 */ +	u8 rsvd11[32];   /* DWORD 10 */  	u8 max_cxns[16]; /* DWORD 11 */ -	u8 rsvd10[11]; /* DWORD  11*/ +	u8 rsvd12[11]; /* DWORD  11*/  	u8 invld; /* DWORD 11 */ -	u8 rsvd11;/* DWORD 11*/ +	u8 rsvd13;/* DWORD 11*/  	u8 dmsg; /* DWORD 11 */  	u8 data_seq_inorder; /* DWORD 11 */  	u8 pdu_seq_inorder; /* DWORD 11 */ -	u8 rsvd12[32]; /*DWORD 12 */ -	u8 rsvd13[32]; /* DWORD 13 */ -	u8 rsvd14[32]; /* DWORD 14 */ -	u8 rsvd15[32]; /* DWORD 15 */ +	u8 rsvd14[32]; /*DWORD 12 */ +	u8 rsvd15[32]; /* DWORD 13 */ +	u8 rsvd16[32]; /* DWORD 14 */ +	u8 rsvd17[32]; /* DWORD 15 */  } __packed; @@ -919,6 +999,10 @@ struct be_ring {  	u32 cidx;		/* consumer index */  	u32 pidx;		/* producer index -- not used by most rings */  	u32 item_size;		/* size in bytes of one object */ +	u8 ulp_num;	/* ULP to which CID binded */ +	u16 register_set; +	u16 doorbell_format; +	u32 doorbell_offset;  	void *va;		/* The virtual address of the ring.  This  				 * should be last to allow 32 & 64 bit debugger @@ -926,18 +1010,6 @@ struct be_ring {  				 */  }; -struct hwi_wrb_context { -	struct list_head wrb_handle_list; -	struct list_head wrb_handle_drvr_list; -	struct wrb_handle **pwrb_handle_base; -	struct wrb_handle **pwrb_handle_basestd; -	struct iscsi_wrb *plast_wrb; -	unsigned short alloc_index; -	unsigned short free_index; -	unsigned short wrb_handles_available; -	unsigned short cid; -}; -  struct hwi_controller {  	struct list_head io_sgl_list;  	struct list_head eh_sgl_list; @@ -946,8 +1018,8 @@ struct hwi_controller {  	struct hwi_wrb_context *wrb_context;  	struct mcc_wrb *pmcc_wrb_base; -	struct be_ring default_pdu_hdr; -	struct be_ring default_pdu_data; +	struct be_ring default_pdu_hdr[BEISCSI_ULP_COUNT]; +	struct be_ring default_pdu_data[BEISCSI_ULP_COUNT];  	struct hwi_context_memory *phwi_ctxt;  }; @@ -978,11 +1050,10 @@ struct hwi_context_memory {  	struct be_eq_obj be_eq[MAX_CPUS];  	struct be_queue_info be_cq[MAX_CPUS - 1]; -	struct be_queue_info be_def_hdrq; -	struct be_queue_info be_def_dataq; -  	struct be_queue_info *be_wrbq; -	struct hwi_async_pdu_context *pasync_ctx; +	struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT]; +	struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT]; +	struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT];  };  /* Logging related definitions */ @@ -992,6 +1063,7 @@ struct hwi_context_memory {  #define BEISCSI_LOG_EH		0x0008	/* Error Handler */  #define BEISCSI_LOG_IO		0x0010	/* IO Code Path */  #define BEISCSI_LOG_CONFIG	0x0020	/* CONFIG Code Path */ +#define BEISCSI_LOG_ISCSI	0x0040	/* SCSI/iSCSI Protocol related Logs */  #define beiscsi_log(phba, level, mask, fmt, arg...) \  do { \ diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 245a9595a93..07934b0b9ee 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -155,6 +155,43 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba)  	}  } +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, +		 struct be_set_eqd *set_eqd, int num) +{ +	struct be_ctrl_info *ctrl = &phba->ctrl; +	struct be_mcc_wrb *wrb; +	struct be_cmd_req_modify_eq_delay *req; +	unsigned int tag = 0; +	int i; + +	spin_lock(&ctrl->mbox_lock); +	tag = alloc_mcc_tag(phba); +	if (!tag) { +		spin_unlock(&ctrl->mbox_lock); +		return tag; +	} + +	wrb = wrb_from_mccq(phba); +	req = embedded_payload(wrb); + +	wrb->tag0 |= tag; +	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); +	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, +		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); + +	req->num_eq = cpu_to_le32(num); +	for (i = 0; i < num; i++) { +		req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); +		req->delay[i].phase = 0; +		req->delay[i].delay_multiplier = +				cpu_to_le32(set_eqd[i].delay_multiplier); +	} + +	be_mcc_notify(phba); +	spin_unlock(&ctrl->mbox_lock); +	return tag; +} +  /**   * mgmt_reopen_session()- Reopen a session based on reopen_type   * @phba: Device priv structure instance @@ -278,6 +315,18 @@ unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,  	return tag;  } +/** + * mgmt_get_fw_config()- Get the FW config for the function + * @ctrl: ptr to Ctrl Info + * @phba: ptr to the dev priv structure + * + * Get the FW config and resources available for the function. + * The resources are created based on the count received here. + * + * return + *	Success: 0 + *	Failure: Non-Zero Value + **/  int mgmt_get_fw_config(struct be_ctrl_info *ctrl,  				struct beiscsi_hba *phba)  { @@ -291,31 +340,79 @@ int mgmt_get_fw_config(struct be_ctrl_info *ctrl,  	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);  	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, -			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req)); +			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, +			   EMBED_MBX_MAX_PAYLOAD_SIZE);  	status = be_mbox_notify(ctrl);  	if (!status) { +		uint8_t ulp_num = 0;  		struct be_fw_cfg *pfw_cfg;  		pfw_cfg = req; + +		if (!is_chip_be2_be3r(phba)) { +			phba->fw_config.eqid_count = pfw_cfg->eqid_count; +			phba->fw_config.cqid_count = pfw_cfg->cqid_count; + +			beiscsi_log(phba, KERN_INFO, +				    BEISCSI_LOG_INIT, +				    "BG_%d : EQ_Count : %d CQ_Count : %d\n", +				    phba->fw_config.eqid_count, +				    phba->fw_config.cqid_count); +		} + +		for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) +			if (pfw_cfg->ulp[ulp_num].ulp_mode & +			    BEISCSI_ULP_ISCSI_INI_MODE) +				set_bit(ulp_num, +				&phba->fw_config.ulp_supported); +  		phba->fw_config.phys_port = pfw_cfg->phys_port; -		phba->fw_config.iscsi_icd_start = -					pfw_cfg->ulp[0].icd_base; -		phba->fw_config.iscsi_icd_count = -					pfw_cfg->ulp[0].icd_count; -		phba->fw_config.iscsi_cid_start = -					pfw_cfg->ulp[0].sq_base; -		phba->fw_config.iscsi_cid_count = -					pfw_cfg->ulp[0].sq_count; -		if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) { -			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, -				    "BG_%d : FW reported MAX CXNS as %d\t" -				    "Max Supported = %d.\n", -				    phba->fw_config.iscsi_cid_count, -				    BE2_MAX_SESSIONS); -			phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2; +		for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +			if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { + +				phba->fw_config.iscsi_cid_start[ulp_num] = +					pfw_cfg->ulp[ulp_num].sq_base; +				phba->fw_config.iscsi_cid_count[ulp_num] = +					pfw_cfg->ulp[ulp_num].sq_count; + +				phba->fw_config.iscsi_icd_start[ulp_num] = +					pfw_cfg->ulp[ulp_num].icd_base; +				phba->fw_config.iscsi_icd_count[ulp_num] = +					pfw_cfg->ulp[ulp_num].icd_count; + +				phba->fw_config.iscsi_chain_start[ulp_num] = +					pfw_cfg->chain_icd[ulp_num].chain_base; +				phba->fw_config.iscsi_chain_count[ulp_num] = +					pfw_cfg->chain_icd[ulp_num].chain_count; + +				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +					    "BG_%d : Function loaded on ULP : %d\n" +					    "\tiscsi_cid_count : %d\n" +					    "\tiscsi_cid_start : %d\n" +					    "\t iscsi_icd_count : %d\n" +					    "\t iscsi_icd_start : %d\n", +					    ulp_num, +					    phba->fw_config. +					    iscsi_cid_count[ulp_num], +					    phba->fw_config. +					    iscsi_cid_start[ulp_num], +					    phba->fw_config. +					    iscsi_icd_count[ulp_num], +					    phba->fw_config. +					    iscsi_icd_start[ulp_num]); +			}  		} + +		phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode & +						  BEISCSI_FUNC_DUA_MODE); + +		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, +			    "BG_%d : DUA Mode : 0x%x\n", +			    phba->fw_config.dual_ulp_aware); +  	} else { -		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, +		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,  			    "BG_%d : Failed in mgmt_get_fw_config\n"); +		status = -EINVAL;  	}  	spin_unlock(&ctrl->mbox_lock); @@ -387,8 +484,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,  					 struct be_dma_mem *nonemb_cmd)  {  	struct be_cmd_resp_hdr *resp; -	struct be_mcc_wrb *wrb = wrb_from_mccq(phba); -	struct be_sge *mcc_sge = nonembedded_sgl(wrb); +	struct be_mcc_wrb *wrb; +	struct be_sge *mcc_sge;  	unsigned int tag = 0;  	struct iscsi_bsg_request *bsg_req = job->request;  	struct be_bsg_vendor_cmd *req = nonemb_cmd->va; @@ -405,7 +502,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,  	req->sector = sector;  	req->offset = offset;  	spin_lock(&ctrl->mbox_lock); -	memset(wrb, 0, sizeof(*wrb));  	switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {  	case BEISCSI_WRITE_FLASH: @@ -435,6 +531,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,  		return tag;  	} +	wrb = wrb_from_mccq(phba); +	mcc_sge = nonembedded_sgl(wrb);  	be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,  			   job->request_payload.sg_cnt);  	mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma)); @@ -448,7 +546,16 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,  	return tag;  } -int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute) +/** + * mgmt_epfw_cleanup()- Inform FW to cleanup data structures. + * @phba: pointer to dev priv structure + * @ulp_num: ULP number. + * + * return + *	Success: 0 + *	Failure: Non-Zero Value + **/ +int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)  {  	struct be_ctrl_info *ctrl = &phba->ctrl;  	struct be_mcc_wrb *wrb = wrb_from_mccq(phba); @@ -456,15 +563,14 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)  	int status = 0;  	spin_lock(&ctrl->mbox_lock); -	memset(wrb, 0, sizeof(*wrb));  	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);  	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,  			   OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); -	req->chute = chute; -	req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba)); -	req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba)); +	req->chute = (1 << ulp_num); +	req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num)); +	req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));  	status =  be_mcc_notify_wait(phba);  	if (status) @@ -585,6 +691,16 @@ unsigned int mgmt_upload_connection(struct beiscsi_hba *phba,  	return tag;  } +/** + * mgmt_open_connection()- Establish a TCP CXN + * @dst_addr: Destination Address + * @beiscsi_ep: ptr to device endpoint struct + * @nonemb_cmd: ptr to memory allocated for command + * + * return + *	Success: Tag number of the MBX Command issued + *	Failure: Error code + **/  int mgmt_open_connection(struct beiscsi_hba *phba,  			 struct sockaddr *dst_addr,  			 struct beiscsi_endpoint *beiscsi_ep, @@ -596,20 +712,23 @@ int mgmt_open_connection(struct beiscsi_hba *phba,  	struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;  	struct be_ctrl_info *ctrl = &phba->ctrl;  	struct be_mcc_wrb *wrb; -	struct tcp_connect_and_offload_in *req; +	struct tcp_connect_and_offload_in_v1 *req;  	unsigned short def_hdr_id;  	unsigned short def_data_id;  	struct phys_addr template_address = { 0, 0 };  	struct phys_addr *ptemplate_address;  	unsigned int tag = 0; -	unsigned int i; +	unsigned int i, ulp_num;  	unsigned short cid = beiscsi_ep->ep_cid;  	struct be_sge *sge;  	phwi_ctrlr = phba->phwi_ctrlr;  	phwi_context = phwi_ctrlr->phwi_ctxt; -	def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba); -	def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba); + +	ulp_num = phwi_ctrlr->wrb_context[BE_GET_CRI_FROM_CID(cid)].ulp_num; + +	def_hdr_id = (unsigned short)HWI_GET_DEF_HDRQ_ID(phba, ulp_num); +	def_data_id = (unsigned short)HWI_GET_DEF_BUFQ_ID(phba, ulp_num);  	ptemplate_address = &template_address;  	ISCSI_GET_PDU_TEMPLATE_ADDRESS(phba, ptemplate_address); @@ -620,17 +739,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba,  		return tag;  	}  	wrb = wrb_from_mccq(phba); -	memset(wrb, 0, sizeof(*wrb));  	sge = nonembedded_sgl(wrb);  	req = nonemb_cmd->va;  	memset(req, 0, sizeof(*req));  	wrb->tag0 |= tag; -	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); +	be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);  	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,  			   OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD, -			   sizeof(*req)); +			   nonemb_cmd->size);  	if (dst_addr->sa_family == PF_INET) {  		__be32 s_addr = daddr_in->sin_addr.s_addr;  		req->ip_address.ip_type = BE2_IPV4; @@ -676,6 +794,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,  	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));  	sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);  	sge->len = cpu_to_le32(nonemb_cmd->size); + +	if (!is_chip_be2_be3r(phba)) { +		req->hdr.version = MBX_CMD_VER1; +		req->tcp_window_size = 0; +		req->tcp_window_scale_count = 2; +	} +  	be_mcc_notify(phba);  	spin_unlock(&ctrl->mbox_lock);  	return tag; @@ -722,7 +847,7 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,  				int resp_buf_len)  {  	struct be_ctrl_info *ctrl = &phba->ctrl; -	struct be_mcc_wrb *wrb = wrb_from_mccq(phba); +	struct be_mcc_wrb *wrb;  	struct be_sge *sge;  	unsigned int tag;  	int rc = 0; @@ -734,7 +859,8 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,  		rc = -ENOMEM;  		goto free_cmd;  	} -	memset(wrb, 0, sizeof(*wrb)); + +	wrb = wrb_from_mccq(phba);  	wrb->tag0 |= tag;  	sge = nonembedded_sgl(wrb); @@ -746,19 +872,25 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,  	be_mcc_notify(phba);  	spin_unlock(&ctrl->mbox_lock); -	rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va); +	rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd); + +	if (resp_buf) +		memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); +  	if (rc) { -		beiscsi_log(phba, KERN_ERR, +		/* Check if the MBX Cmd needs to be re-issued */ +		if (rc == -EAGAIN) +			return rc; + +		beiscsi_log(phba, KERN_WARNING,  			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,  			    "BG_%d : mgmt_exec_nonemb_cmd Failed status\n"); -		rc = -EIO; -		goto free_cmd; +		if (rc != -EBUSY) +			goto free_cmd; +		else +			return rc;  	} - -	if (resp_buf) -		memcpy(resp_buf, nonemb_cmd->va, resp_buf_len); -  free_cmd:  	pci_free_consistent(ctrl->pdev, nonemb_cmd->size,  			    nonemb_cmd->va, nonemb_cmd->dma); @@ -861,7 +993,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,  		uint32_t boot_proto)  {  	struct be_cmd_get_def_gateway_resp gtway_addr_set; -	struct be_cmd_get_if_info_resp if_info; +	struct be_cmd_get_if_info_resp *if_info;  	struct be_cmd_set_dhcp_req *dhcpreq;  	struct be_cmd_rel_dhcp_req *reldhcp;  	struct be_dma_mem nonemb_cmd; @@ -872,7 +1004,6 @@ int mgmt_set_ip(struct beiscsi_hba *phba,  	if (mgmt_get_all_if_id(phba))  		return -EIO; -	memset(&if_info, 0, sizeof(if_info));  	ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ?  		BE2_IPV6 : BE2_IPV4 ; @@ -881,7 +1012,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,  		return rc;  	if (boot_proto == ISCSI_BOOTPROTO_DHCP) { -		if (if_info.dhcp_state) { +		if (if_info->dhcp_state) {  			beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,  				    "BG_%d : DHCP Already Enabled\n");  			return 0; @@ -894,9 +1025,9 @@ int mgmt_set_ip(struct beiscsi_hba *phba,  				IP_V6_LEN : IP_V4_LEN;  	} else { -		if (if_info.dhcp_state) { +		if (if_info->dhcp_state) { -			memset(&if_info, 0, sizeof(if_info)); +			memset(if_info, 0, sizeof(*if_info));  			rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,  				OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,  				sizeof(*reldhcp)); @@ -919,8 +1050,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,  	}  	/* Delete the Static IP Set */ -	if (if_info.ip_addr.addr[0]) { -		rc = mgmt_static_ip_modify(phba, &if_info, ip_param, NULL, +	if (if_info->ip_addr.addr[0]) { +		rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL,  					   IP_ACTION_DEL);  		if (rc)  			return rc; @@ -966,7 +1097,7 @@ int mgmt_set_ip(struct beiscsi_hba *phba,  		return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);  	} else { -		return mgmt_static_ip_modify(phba, &if_info, ip_param, +		return mgmt_static_ip_modify(phba, if_info, ip_param,  					     subnet_param, IP_ACTION_ADD);  	} @@ -1031,27 +1162,64 @@ int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,  }  int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, -		     struct be_cmd_get_if_info_resp *if_info) +		     struct be_cmd_get_if_info_resp **if_info)  {  	struct be_cmd_get_if_info_req *req;  	struct be_dma_mem nonemb_cmd; +	uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);  	int rc;  	if (mgmt_get_all_if_id(phba))  		return -EIO; -	rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, -				 OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, -				 sizeof(*if_info)); -	if (rc) -		return rc; +	do { +		rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, +					 OPCODE_COMMON_ISCSI_NTWK_GET_IF_INFO, +					 ioctl_size); +		if (rc) +			return rc; -	req = nonemb_cmd.va; -	req->interface_hndl = phba->interface_handle; -	req->ip_type = ip_type; +		req = nonemb_cmd.va; +		req->interface_hndl = phba->interface_handle; +		req->ip_type = ip_type; -	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, if_info, -				    sizeof(*if_info)); +		/* Allocate memory for if_info */ +		*if_info = kzalloc(ioctl_size, GFP_KERNEL); +		if (!*if_info) { +			beiscsi_log(phba, KERN_ERR, +				    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, +				    "BG_%d : Memory Allocation Failure\n"); + +				/* Free the DMA memory for the IOCTL issuing */ +				pci_free_consistent(phba->ctrl.pdev, +						    nonemb_cmd.size, +						    nonemb_cmd.va, +						    nonemb_cmd.dma); +				return -ENOMEM; +		} + +		rc =  mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, *if_info, +					   ioctl_size); + +		/* Check if the error is because of Insufficent_Buffer */ +		if (rc == -EAGAIN) { + +			/* Get the new memory size */ +			ioctl_size = ((struct be_cmd_resp_hdr *) +				      nonemb_cmd.va)->actual_resp_len; +			ioctl_size += sizeof(struct be_cmd_req_hdr); + +			/* Free the previous allocated DMA memory */ +			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, +					    nonemb_cmd.va, +					    nonemb_cmd.dma); + +			/* Free the virtual memory */ +			kfree(*if_info); +		} else +			break; +	} while (true); +	return rc;  }  int mgmt_get_nic_conf(struct beiscsi_hba *phba, @@ -1225,7 +1393,6 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,  {  	int rc;  	unsigned int tag; -	struct be_mcc_wrb *wrb = NULL;  	tag = be_cmd_set_vlan(phba, vlan_tag);  	if (!tag) { @@ -1235,7 +1402,7 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,  		return -EBUSY;  	} -	rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL); +	rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);  	if (rc) {  		beiscsi_log(phba, KERN_ERR,  			    (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), @@ -1281,7 +1448,7 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,  }  /** - * beiscsi_active_cid_disp()- Display Sessions Active + * beiscsi_active_session_disp()- Display Sessions Active   * @dev: ptr to device not used.   * @attr: device attribute, not used.   * @buf: contains formatted text Session Count @@ -1290,14 +1457,56 @@ beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr,   * size of the formatted string   **/  ssize_t -beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr, +beiscsi_active_session_disp(struct device *dev, struct device_attribute *attr,  			 char *buf)  {  	struct Scsi_Host *shost = class_to_shost(dev);  	struct beiscsi_hba *phba = iscsi_host_priv(shost); +	uint16_t avlbl_cids = 0, ulp_num, len = 0, total_cids = 0; + +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { +			avlbl_cids = BEISCSI_ULP_AVLBL_CID(phba, ulp_num); +			total_cids = BEISCSI_GET_CID_COUNT(phba, ulp_num); +			len += snprintf(buf+len, PAGE_SIZE - len, +					"ULP%d : %d\n", ulp_num, +					(total_cids - avlbl_cids)); +		} else +			len += snprintf(buf+len, PAGE_SIZE - len, +					"ULP%d : %d\n", ulp_num, 0); +	} + +	return len; +} + +/** + * beiscsi_free_session_disp()- Display Avaliable Session + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_free_session_disp(struct device *dev, struct device_attribute *attr, +		       char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct beiscsi_hba *phba = iscsi_host_priv(shost); +	uint16_t ulp_num, len = 0; + +	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { +		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) +			len += snprintf(buf+len, PAGE_SIZE - len, +					"ULP%d : %d\n", ulp_num, +					BEISCSI_ULP_AVLBL_CID(phba, ulp_num)); +		else +			len += snprintf(buf+len, PAGE_SIZE - len, +					"ULP%d : %d\n", ulp_num, 0); +	} -	return snprintf(buf, PAGE_SIZE, "%d\n", -		       (phba->params.cxns_per_ctrl - phba->avlbl_cids)); +	return len;  }  /** @@ -1338,6 +1547,25 @@ beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,  	}  } +/** + * beiscsi_phys_port()- Display Physical Port Identifier + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text port identifier + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_phys_port_disp(struct device *dev, struct device_attribute *attr, +			 char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct beiscsi_hba *phba = iscsi_host_priv(shost); + +	return snprintf(buf, PAGE_SIZE, "Port Identifier : %d\n", +			phba->fw_config.phys_port); +}  void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,  			     struct wrb_handle *pwrb_handle, @@ -1411,10 +1639,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,  	memset(pwrb, 0, sizeof(*pwrb)); -	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, -		      max_burst_length, pwrb, params->dw[offsetof -		      (struct amap_beiscsi_offload_params, -		      max_burst_length) / 32]);  	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,  		      max_burst_length, pwrb, params->dw[offsetof  		      (struct amap_beiscsi_offload_params, @@ -1436,7 +1660,9 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,  		      params->dw[offsetof(struct amap_beiscsi_offload_params,  		      first_burst_length) / 32]);  	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, -		      max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN); +		      max_recv_dataseg_len, pwrb, +		      params->dw[offsetof(struct amap_beiscsi_offload_params, +		      max_recv_data_segment_length) / 32]);  	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,  		      max_cxns, pwrb, BEISCSI_MAX_CXNS);  	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb, diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h index 04af7e74fe4..24a8fc57747 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.h +++ b/drivers/scsi/be2iscsi/be_mgmt.h @@ -294,7 +294,7 @@ int mgmt_get_nic_conf(struct beiscsi_hba *phba,  		      struct be_cmd_get_nic_conf_resp *mac);  int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, -		     struct be_cmd_get_if_info_resp *if_info); +		     struct be_cmd_get_if_info_resp **if_info);  int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type,  		     struct be_cmd_get_def_gateway_resp *gateway); @@ -315,12 +315,19 @@ ssize_t beiscsi_drvr_ver_disp(struct device *dev,  ssize_t beiscsi_fw_ver_disp(struct device *dev,  			     struct device_attribute *attr, char *buf); -ssize_t beiscsi_active_cid_disp(struct device *dev, -				 struct device_attribute *attr, char *buf); +ssize_t beiscsi_active_session_disp(struct device *dev, +				     struct device_attribute *attr, char *buf);  ssize_t beiscsi_adap_family_disp(struct device *dev,  				  struct device_attribute *attr, char *buf); + +ssize_t beiscsi_free_session_disp(struct device *dev, +				   struct device_attribute *attr, char *buf); + +ssize_t beiscsi_phys_port_disp(struct device *dev, +				struct device_attribute *attr, char *buf); +  void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,  			     struct wrb_handle *pwrb_handle,  			     struct be_mem_descriptor *mem_descr); @@ -328,5 +335,7 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,  void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,  			     struct wrb_handle *pwrb_handle);  void beiscsi_ue_detect(struct beiscsi_hba *phba); +int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, +			 struct be_set_eqd *, int num);  #endif diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index 520540a5fef..e3f67b097a5 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -1367,10 +1367,6 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,  	struct bfa_iocfc_s      *iocfc = &bfa->iocfc;  	bfa_status_t            status; -	iocfc->faa_args.faa_attr = attr; -	iocfc->faa_args.faa_cb.faa_cbfn = cbfn; -	iocfc->faa_args.faa_cb.faa_cbarg = cbarg; -  	status = bfa_faa_validate_request(bfa);  	if (status != BFA_STATUS_OK)  		return status; @@ -1378,6 +1374,10 @@ bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,  	if (iocfc->faa_args.busy == BFA_TRUE)  		return BFA_STATUS_DEVBUSY; +	iocfc->faa_args.faa_attr = attr; +	iocfc->faa_args.faa_cb.faa_cbfn = cbfn; +	iocfc->faa_args.faa_cb.faa_cbarg = cbarg; +  	iocfc->faa_args.busy = BFA_TRUE;  	memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));  	bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC, diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h index d40a79f5265..877b86dd283 100644 --- a/drivers/scsi/bfa/bfa_defs.h +++ b/drivers/scsi/bfa/bfa_defs.h @@ -132,6 +132,7 @@ enum bfa_status {  	BFA_STATUS_ETIMER	= 5,	/*  Timer expired - Retry, if persists,  					 *  contact support */  	BFA_STATUS_EPROTOCOL	= 6,	/*  Protocol error */ +	BFA_STATUS_BADFLASH	= 9,	/*  Flash is bad */  	BFA_STATUS_SFP_UNSUPP	= 10,	/*  Unsupported SFP - Replace SFP */  	BFA_STATUS_UNKNOWN_VFID = 11,	/*  VF_ID not found */  	BFA_STATUS_DATACORRUPTED = 12,  /*  Diag returned data corrupted */ diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index 562ef739b0b..64069a0a3d0 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h @@ -1026,7 +1026,7 @@ struct fc_alpabm_s {  #define FC_ED_TOV	2  #define FC_REC_TOV	(FC_ED_TOV + 1)  #define FC_RA_TOV	10 -#define FC_ELS_TOV	((2 * FC_RA_TOV) + 1) +#define FC_ELS_TOV	(2 * FC_RA_TOV)  #define FC_FCCT_TOV	(3 * FC_RA_TOV)  /* diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index 94d5d0102f7..42bcb970445 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -296,6 +296,7 @@ wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,  struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,  					    u16 vf_id, wwn_t lpwwn); +void bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, char *symname);  void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,  			    struct bfa_lport_info_s *port_info);  void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port, diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index 2f61a5af365..ff75ef89175 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -773,7 +773,20 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport,  	bfa_trc(lport->fcs, fchs->type);  	if (!bfa_fcs_lport_is_online(lport)) { -		bfa_stats(lport, uf_recv_drops); +		/* +		 * In direct attach topology, it is possible to get a PLOGI +		 * before the lport is online due to port feature +		 * (QoS/Trunk/FEC/CR), so send a rjt +		 */ +		if ((fchs->type == FC_TYPE_ELS) && +			(els_cmd->els_code == FC_ELS_PLOGI)) { +			bfa_fcs_lport_send_ls_rjt(lport, fchs, +				FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD, +				FC_LS_RJT_EXP_NO_ADDL_INFO); +			bfa_stats(lport, plogi_rcvd); +		} else +			bfa_stats(lport, uf_recv_drops); +  		return;  	} @@ -1097,6 +1110,17 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,  	bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);  } +void +bfa_fcs_lport_set_symname(struct bfa_fcs_lport_s *port, +				char *symname) +{ +	strcpy(port->port_cfg.sym_name.symname, symname); + +	if (bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) +		bfa_fcs_lport_ns_util_send_rspn_id( +			BFA_FCS_GET_NS_FROM_PORT(port), NULL); +} +  /*   *  fcs_lport_api   */ @@ -5140,9 +5164,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)  	u8 *psymbl = &symbl[0];  	int len; -	if (!bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online)) -		return; -  	/* Avoid sending RSPN in the following states. */  	if (bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_offline) ||  	    bfa_sm_cmp_state(ns, bfa_fcs_lport_ns_sm_plogi_sending) || diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index f78bcb6696b..315d6d6dcfc 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -21,6 +21,7 @@  #include "bfi_reg.h"  #include "bfa_defs.h"  #include "bfa_defs_svc.h" +#include "bfi.h"  BFA_TRC_FILE(CNA, IOC); @@ -45,6 +46,14 @@ BFA_TRC_FILE(CNA, IOC);  #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) +#define bfa_ioc_state_disabled(__sm)		\ +	(((__sm) == BFI_IOC_UNINIT) ||		\ +	((__sm) == BFI_IOC_INITING) ||		\ +	((__sm) == BFI_IOC_HWINIT) ||		\ +	((__sm) == BFI_IOC_DISABLED) ||		\ +	((__sm) == BFI_IOC_FAIL) ||		\ +	((__sm) == BFI_IOC_CFG_DISABLED)) +  /*   * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.   */ @@ -102,6 +111,12 @@ static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);  static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);  static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);  static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); +static enum bfi_ioc_img_ver_cmp_e bfa_ioc_fw_ver_patch_cmp( +				struct bfi_ioc_image_hdr_s *base_fwhdr, +				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp); +static enum bfi_ioc_img_ver_cmp_e bfa_ioc_flash_fwver_cmp( +				struct bfa_ioc_s *ioc, +				struct bfi_ioc_image_hdr_s *base_fwhdr);  /*   * IOC state machine definitions/declarations @@ -1454,28 +1469,42 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)  }  /* - * Returns TRUE if same. + * Returns TRUE if driver is willing to work with current smem f/w version.   */  bfa_boolean_t -bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) +bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, +		struct bfi_ioc_image_hdr_s *smem_fwhdr)  {  	struct bfi_ioc_image_hdr_s *drv_fwhdr; -	int i; +	enum bfi_ioc_img_ver_cmp_e smem_flash_cmp, drv_smem_cmp;  	drv_fwhdr = (struct bfi_ioc_image_hdr_s *)  		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); -	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { -		if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) { -			bfa_trc(ioc, i); -			bfa_trc(ioc, fwhdr->md5sum[i]); -			bfa_trc(ioc, drv_fwhdr->md5sum[i]); -			return BFA_FALSE; -		} +	/* +	 * If smem is incompatible or old, driver should not work with it. +	 */ +	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, smem_fwhdr); +	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || +		drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { +		return BFA_FALSE;  	} -	bfa_trc(ioc, fwhdr->md5sum[0]); -	return BFA_TRUE; +	/* +	 * IF Flash has a better F/W than smem do not work with smem. +	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. +	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w. +	 */ +	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, smem_fwhdr); + +	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) { +		return BFA_FALSE; +	} else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) { +		return BFA_TRUE; +	} else { +		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? +			BFA_TRUE : BFA_FALSE; +	}  }  /* @@ -1485,17 +1514,9 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)  static bfa_boolean_t  bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)  { -	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; +	struct bfi_ioc_image_hdr_s fwhdr;  	bfa_ioc_fwver_get(ioc, &fwhdr); -	drv_fwhdr = (struct bfi_ioc_image_hdr_s *) -		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); - -	if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) { -		bfa_trc(ioc, fwhdr.signature); -		bfa_trc(ioc, drv_fwhdr->signature); -		return BFA_FALSE; -	}  	if (swab32(fwhdr.bootenv) != boot_env) {  		bfa_trc(ioc, fwhdr.bootenv); @@ -1506,6 +1527,168 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)  	return bfa_ioc_fwver_cmp(ioc, &fwhdr);  } +static bfa_boolean_t +bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr_s *fwhdr_1, +				struct bfi_ioc_image_hdr_s *fwhdr_2) +{ +	int i; + +	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) +		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) +			return BFA_FALSE; + +	return BFA_TRUE; +} + +/* + * Returns TRUE if major minor and maintainence are same. + * If patch versions are same, check for MD5 Checksum to be same. + */ +static bfa_boolean_t +bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr_s *drv_fwhdr, +				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp) +{ +	if (drv_fwhdr->signature != fwhdr_to_cmp->signature) +		return BFA_FALSE; + +	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) +		return BFA_FALSE; + +	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) +		return BFA_FALSE; + +	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) +		return BFA_FALSE; + +	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && +		drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && +		drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) { +		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); +	} + +	return BFA_TRUE; +} + +static bfa_boolean_t +bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr_s *flash_fwhdr) +{ +	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) +		return BFA_FALSE; + +	return BFA_TRUE; +} + +static bfa_boolean_t fwhdr_is_ga(struct bfi_ioc_image_hdr_s *fwhdr) +{ +	if (fwhdr->fwver.phase == 0 && +		fwhdr->fwver.build == 0) +		return BFA_TRUE; + +	return BFA_FALSE; +} + +/* + * Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. + */ +static enum bfi_ioc_img_ver_cmp_e +bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr_s *base_fwhdr, +				struct bfi_ioc_image_hdr_s *fwhdr_to_cmp) +{ +	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == BFA_FALSE) +		return BFI_IOC_IMG_VER_INCOMP; + +	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) +		return BFI_IOC_IMG_VER_BETTER; + +	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) +		return BFI_IOC_IMG_VER_OLD; + +	/* +	 * GA takes priority over internal builds of the same patch stream. +	 * At this point major minor maint and patch numbers are same. +	 */ + +	if (fwhdr_is_ga(base_fwhdr) == BFA_TRUE) { +		if (fwhdr_is_ga(fwhdr_to_cmp)) +			return BFI_IOC_IMG_VER_SAME; +		else +			return BFI_IOC_IMG_VER_OLD; +	} else { +		if (fwhdr_is_ga(fwhdr_to_cmp)) +			return BFI_IOC_IMG_VER_BETTER; +	} + +	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) +		return BFI_IOC_IMG_VER_BETTER; +	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) +		return BFI_IOC_IMG_VER_OLD; + +	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) +		return BFI_IOC_IMG_VER_BETTER; +	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) +		return BFI_IOC_IMG_VER_OLD; + +	/* +	 * All Version Numbers are equal. +	 * Md5 check to be done as a part of compatibility check. +	 */ +	return BFI_IOC_IMG_VER_SAME; +} + +#define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */ + +bfa_status_t +bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off, +				u32 *fwimg) +{ +	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, +			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), +			(char *)fwimg, BFI_FLASH_CHUNK_SZ); +} + +static enum bfi_ioc_img_ver_cmp_e +bfa_ioc_flash_fwver_cmp(struct bfa_ioc_s *ioc, +			struct bfi_ioc_image_hdr_s *base_fwhdr) +{ +	struct bfi_ioc_image_hdr_s *flash_fwhdr; +	bfa_status_t status; +	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; + +	status = bfa_ioc_flash_img_get_chnk(ioc, 0, fwimg); +	if (status != BFA_STATUS_OK) +		return BFI_IOC_IMG_VER_INCOMP; + +	flash_fwhdr = (struct bfi_ioc_image_hdr_s *) fwimg; +	if (bfa_ioc_flash_fwver_valid(flash_fwhdr) == BFA_TRUE) +		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); +	else +		return BFI_IOC_IMG_VER_INCOMP; +} + + +/* + * Invalidate fwver signature + */ +bfa_status_t +bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc) +{ + +	u32	pgnum, pgoff; +	u32	loff = 0; +	enum bfi_ioc_state ioc_fwstate; + +	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); +	if (!bfa_ioc_state_disabled(ioc_fwstate)) +		return BFA_STATUS_ADAPTER_ENABLED; + +	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); +	pgoff = PSS_SMEM_PGOFF(loff); +	writel(pgnum, ioc->ioc_regs.host_page_num_fn); +	bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, BFA_IOC_FW_INV_SIGN); + +	return BFA_STATUS_OK; +} +  /*   * Conditionally flush any pending message from firmware at start.   */ @@ -1544,8 +1727,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)  		BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);  	if (!fwvalid) { -		bfa_ioc_boot(ioc, boot_type, boot_env); -		bfa_ioc_poll_fwinit(ioc); +		if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK) +			bfa_ioc_poll_fwinit(ioc);  		return;  	} @@ -1580,8 +1763,8 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)  	/*  	 * Initialize the h/w for any other states.  	 */ -	bfa_ioc_boot(ioc, boot_type, boot_env); -	bfa_ioc_poll_fwinit(ioc); +	if (bfa_ioc_boot(ioc, boot_type, boot_env) == BFA_STATUS_OK) +		bfa_ioc_poll_fwinit(ioc);  }  static void @@ -1684,7 +1867,7 @@ bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)  /*   *	Initiate a full firmware download.   */ -static void +static bfa_status_t  bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,  		    u32 boot_env)  { @@ -1694,28 +1877,60 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,  	u32 chunkno = 0;  	u32 i;  	u32 asicmode; +	u32 fwimg_size; +	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; +	bfa_status_t status; + +	if (boot_env == BFI_FWBOOT_ENV_OS && +		boot_type == BFI_FWBOOT_TYPE_FLASH) { +		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); + +		status = bfa_ioc_flash_img_get_chnk(ioc, +			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); +		if (status != BFA_STATUS_OK) +			return status; + +		fwimg = fwimg_buf; +	} else { +		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); +		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), +					BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); +	} + +	bfa_trc(ioc, fwimg_size); -	bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc))); -	fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);  	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);  	pgoff = PSS_SMEM_PGOFF(loff);  	writel(pgnum, ioc->ioc_regs.host_page_num_fn); -	for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) { +	for (i = 0; i < fwimg_size; i++) {  		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {  			chunkno = BFA_IOC_FLASH_CHUNK_NO(i); -			fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), + +			if (boot_env == BFI_FWBOOT_ENV_OS && +				boot_type == BFI_FWBOOT_TYPE_FLASH) { +				status = bfa_ioc_flash_img_get_chnk(ioc, +					BFA_IOC_FLASH_CHUNK_ADDR(chunkno), +					fwimg_buf); +				if (status != BFA_STATUS_OK) +					return status; + +				fwimg = fwimg_buf; +			} else { +				fwimg = bfa_cb_image_get_chunk( +					bfa_ioc_asic_gen(ioc),  					BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); +			}  		}  		/*  		 * write smem  		 */  		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, -			cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])); +			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);  		loff += sizeof(u32); @@ -1733,8 +1948,12 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,  			ioc->ioc_regs.host_page_num_fn);  	/* -	 * Set boot type and device mode at the end. +	 * Set boot type, env and device mode at the end.  	 */ +	if (boot_env == BFI_FWBOOT_ENV_OS && +		boot_type == BFI_FWBOOT_TYPE_FLASH) { +		boot_type = BFI_FWBOOT_TYPE_NORMAL; +	}  	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,  				ioc->port0_mode, ioc->port1_mode);  	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF, @@ -1743,6 +1962,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,  			swab32(boot_type));  	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,  			swab32(boot_env)); +	return BFA_STATUS_OK;  } @@ -2002,13 +2222,30 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc)   * Interface used by diag module to do firmware boot with memory test   * as the entry vector.   */ -void +bfa_status_t  bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)  { +	struct bfi_ioc_image_hdr_s *drv_fwhdr; +	bfa_status_t status;  	bfa_ioc_stats(ioc, ioc_boots);  	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) -		return; +		return BFA_STATUS_FAILED; + +	if (boot_env == BFI_FWBOOT_ENV_OS && +		boot_type == BFI_FWBOOT_TYPE_NORMAL) { + +		drv_fwhdr = (struct bfi_ioc_image_hdr_s *) +			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + +		/* +		 * Work with Flash iff flash f/w is better than driver f/w. +		 * Otherwise push drivers firmware. +		 */ +		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == +						BFI_IOC_IMG_VER_BETTER) +			boot_type = BFI_FWBOOT_TYPE_FLASH; +	}  	/*  	 * Initialize IOC state of all functions on a chip reset. @@ -2022,8 +2259,14 @@ bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)  	}  	bfa_ioc_msgflush(ioc); -	bfa_ioc_download_fw(ioc, boot_type, boot_env); -	bfa_ioc_lpu_start(ioc); +	status = bfa_ioc_download_fw(ioc, boot_type, boot_env); +	if (status == BFA_STATUS_OK) +		bfa_ioc_lpu_start(ioc); +	else { +		WARN_ON(boot_type == BFI_FWBOOT_TYPE_MEMTEST); +		bfa_iocpf_timeout(ioc); +	} +	return status;  }  /* @@ -2419,14 +2662,6 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)  		bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);  } -#define bfa_ioc_state_disabled(__sm)		\ -	(((__sm) == BFI_IOC_UNINIT) ||		\ -	 ((__sm) == BFI_IOC_INITING) ||		\ -	 ((__sm) == BFI_IOC_HWINIT) ||		\ -	 ((__sm) == BFI_IOC_DISABLED) ||	\ -	 ((__sm) == BFI_IOC_FAIL) ||		\ -	 ((__sm) == BFI_IOC_CFG_DISABLED)) -  /*   * Check if adapter is disabled -- both IOCs should be in a disabled   * state. @@ -3643,7 +3878,7 @@ bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)  		bfa_trc(sfp, sfp->data_valid);  		if (sfp->data_valid) {  			u32	size = sizeof(struct sfp_mem_s); -			u8 *des = (u8 *) &(sfp->sfpmem->srlid_base); +			u8 *des = (u8 *) &(sfp->sfpmem);  			memcpy(des, sfp->dbuf_kva, size);  		}  		/* @@ -6423,3 +6658,408 @@ bfa_fru_intr(void *fruarg, struct bfi_mbmsg_s *msg)  		WARN_ON(1);  	}  } + +/* + * register definitions + */ +#define FLI_CMD_REG			0x0001d000 +#define FLI_RDDATA_REG			0x0001d010 +#define FLI_ADDR_REG			0x0001d004 +#define FLI_DEV_STATUS_REG		0x0001d014 + +#define BFA_FLASH_FIFO_SIZE		128	/* fifo size */ +#define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */ +#define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */ +#define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */ + +enum bfa_flash_cmd { +	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */ +	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */ +}; + +/** + * @brief hardware error definition + */ +enum bfa_flash_err { +	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */ +	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */ +	BFA_FLASH_BAD		= -3,	/*!< flash bad */ +	BFA_FLASH_BUSY		= -4,	/*!< flash busy */ +	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */ +	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */ +	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */ +	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */ +	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */ +}; + +/** + * @brief flash command register data structure + */ +union bfa_flash_cmd_reg_u { +	struct { +#ifdef __BIG_ENDIAN +		u32	act:1; +		u32	rsv:1; +		u32	write_cnt:9; +		u32	read_cnt:9; +		u32	addr_cnt:4; +		u32	cmd:8; +#else +		u32	cmd:8; +		u32	addr_cnt:4; +		u32	read_cnt:9; +		u32	write_cnt:9; +		u32	rsv:1; +		u32	act:1; +#endif +	} r; +	u32	i; +}; + +/** + * @brief flash device status register data structure + */ +union bfa_flash_dev_status_reg_u { +	struct { +#ifdef __BIG_ENDIAN +		u32	rsv:21; +		u32	fifo_cnt:6; +		u32	busy:1; +		u32	init_status:1; +		u32	present:1; +		u32	bad:1; +		u32	good:1; +#else +		u32	good:1; +		u32	bad:1; +		u32	present:1; +		u32	init_status:1; +		u32	busy:1; +		u32	fifo_cnt:6; +		u32	rsv:21; +#endif +	} r; +	u32	i; +}; + +/** + * @brief flash address register data structure + */ +union bfa_flash_addr_reg_u { +	struct { +#ifdef __BIG_ENDIAN +		u32	addr:24; +		u32	dummy:8; +#else +		u32	dummy:8; +		u32	addr:24; +#endif +	} r; +	u32	i; +}; + +/** + * dg flash_raw_private Flash raw private functions + */ +static void +bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, +		  u8 rd_cnt, u8 ad_cnt, u8 op) +{ +	union bfa_flash_cmd_reg_u cmd; + +	cmd.i = 0; +	cmd.r.act = 1; +	cmd.r.write_cnt = wr_cnt; +	cmd.r.read_cnt = rd_cnt; +	cmd.r.addr_cnt = ad_cnt; +	cmd.r.cmd = op; +	writel(cmd.i, (pci_bar + FLI_CMD_REG)); +} + +static void +bfa_flash_set_addr(void __iomem *pci_bar, u32 address) +{ +	union bfa_flash_addr_reg_u addr; + +	addr.r.addr = address & 0x00ffffff; +	addr.r.dummy = 0; +	writel(addr.i, (pci_bar + FLI_ADDR_REG)); +} + +static int +bfa_flash_cmd_act_check(void __iomem *pci_bar) +{ +	union bfa_flash_cmd_reg_u cmd; + +	cmd.i = readl(pci_bar + FLI_CMD_REG); + +	if (cmd.r.act) +		return BFA_FLASH_ERR_CMD_ACT; + +	return 0; +} + +/** + * @brief + * Flush FLI data fifo. + * + * @param[in] pci_bar - pci bar address + * @param[in] dev_status - device status + * + * Return 0 on success, negative error number on error. + */ +static u32 +bfa_flash_fifo_flush(void __iomem *pci_bar) +{ +	u32 i; +	u32 t; +	union bfa_flash_dev_status_reg_u dev_status; + +	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + +	if (!dev_status.r.fifo_cnt) +		return 0; + +	/* fifo counter in terms of words */ +	for (i = 0; i < dev_status.r.fifo_cnt; i++) +		t = readl(pci_bar + FLI_RDDATA_REG); + +	/* +	 * Check the device status. It may take some time. +	 */ +	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { +		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); +		if (!dev_status.r.fifo_cnt) +			break; +	} + +	if (dev_status.r.fifo_cnt) +		return BFA_FLASH_ERR_FIFO_CNT; + +	return 0; +} + +/** + * @brief + * Read flash status. + * + * @param[in] pci_bar - pci bar address + * + * Return 0 on success, negative error number on error. +*/ +static u32 +bfa_flash_status_read(void __iomem *pci_bar) +{ +	union bfa_flash_dev_status_reg_u	dev_status; +	int				status; +	u32			ret_status; +	int				i; + +	status = bfa_flash_fifo_flush(pci_bar); +	if (status < 0) +		return status; + +	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); + +	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { +		status = bfa_flash_cmd_act_check(pci_bar); +		if (!status) +			break; +	} + +	if (status) +		return status; + +	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); +	if (!dev_status.r.fifo_cnt) +		return BFA_FLASH_BUSY; + +	ret_status = readl(pci_bar + FLI_RDDATA_REG); +	ret_status >>= 24; + +	status = bfa_flash_fifo_flush(pci_bar); +	if (status < 0) +		return status; + +	return ret_status; +} + +/** + * @brief + * Start flash read operation. + * + * @param[in] pci_bar - pci bar address + * @param[in] offset - flash address offset + * @param[in] len - read data length + * @param[in] buf - read data buffer + * + * Return 0 on success, negative error number on error. + */ +static u32 +bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, +			 char *buf) +{ +	int status; + +	/* +	 * len must be mutiple of 4 and not exceeding fifo size +	 */ +	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) +		return BFA_FLASH_ERR_LEN; + +	/* +	 * check status +	 */ +	status = bfa_flash_status_read(pci_bar); +	if (status == BFA_FLASH_BUSY) +		status = bfa_flash_status_read(pci_bar); + +	if (status < 0) +		return status; + +	/* +	 * check if write-in-progress bit is cleared +	 */ +	if (status & BFA_FLASH_WIP_MASK) +		return BFA_FLASH_ERR_WIP; + +	bfa_flash_set_addr(pci_bar, offset); + +	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); + +	return 0; +} + +/** + * @brief + * Check flash read operation. + * + * @param[in] pci_bar - pci bar address + * + * Return flash device status, 1 if busy, 0 if not. + */ +static u32 +bfa_flash_read_check(void __iomem *pci_bar) +{ +	if (bfa_flash_cmd_act_check(pci_bar)) +		return 1; + +	return 0; +} +/** + * @brief + * End flash read operation. + * + * @param[in] pci_bar - pci bar address + * @param[in] len - read data length + * @param[in] buf - read data buffer + * + */ +static void +bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) +{ + +	u32 i; + +	/* +	 * read data fifo up to 32 words +	 */ +	for (i = 0; i < len; i += 4) { +		u32 w = readl(pci_bar + FLI_RDDATA_REG); +		*((u32 *) (buf + i)) = swab32(w); +	} + +	bfa_flash_fifo_flush(pci_bar); +} + +/** + * @brief + * Perform flash raw read. + * + * @param[in] pci_bar - pci bar address + * @param[in] offset - flash partition address offset + * @param[in] buf - read data buffer + * @param[in] len - read data length + * + * Return status. + */ + + +#define FLASH_BLOCKING_OP_MAX   500 +#define FLASH_SEM_LOCK_REG	0x18820 + +static int +bfa_raw_sem_get(void __iomem *bar) +{ +	int	locked; + +	locked = readl((bar + FLASH_SEM_LOCK_REG)); +	return !locked; + +} + +bfa_status_t +bfa_flash_sem_get(void __iomem *bar) +{ +	u32 n = FLASH_BLOCKING_OP_MAX; + +	while (!bfa_raw_sem_get(bar)) { +		if (--n <= 0) +			return BFA_STATUS_BADFLASH; +		mdelay(10); +	} +	return BFA_STATUS_OK; +} + +void +bfa_flash_sem_put(void __iomem *bar) +{ +	writel(0, (bar + FLASH_SEM_LOCK_REG)); +} + +bfa_status_t +bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, +		       u32 len) +{ +	u32 n; +	int status; +	u32 off, l, s, residue, fifo_sz; + +	residue = len; +	off = 0; +	fifo_sz = BFA_FLASH_FIFO_SIZE; +	status = bfa_flash_sem_get(pci_bar); +	if (status != BFA_STATUS_OK) +		return status; + +	while (residue) { +		s = offset + off; +		n = s / fifo_sz; +		l = (n + 1) * fifo_sz - s; +		if (l > residue) +			l = residue; + +		status = bfa_flash_read_start(pci_bar, offset + off, l, +								&buf[off]); +		if (status < 0) { +			bfa_flash_sem_put(pci_bar); +			return BFA_STATUS_FAILED; +		} + +		n = BFA_FLASH_BLOCKING_OP_MAX; +		while (bfa_flash_read_check(pci_bar)) { +			if (--n <= 0) { +				bfa_flash_sem_put(pci_bar); +				return BFA_STATUS_FAILED; +			} +		} + +		bfa_flash_read_end(pci_bar, l, &buf[off]); + +		residue -= l; +		off += l; +	} +	bfa_flash_sem_put(pci_bar); + +	return BFA_STATUS_OK; +} diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 90814fe85ac..2e28392c2fb 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -515,6 +515,8 @@ void bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc,  		void *dev, struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg);  void bfa_flash_memclaim(struct bfa_flash_s *flash,  		u8 *dm_kva, u64 dm_pa, bfa_boolean_t mincfg); +bfa_status_t    bfa_flash_raw_read(void __iomem *pci_bar_kva, +				u32 offset, char *buf, u32 len);  /*   *	DIAG module specific @@ -888,7 +890,7 @@ void bfa_ioc_enable(struct bfa_ioc_s *ioc);  void bfa_ioc_disable(struct bfa_ioc_s *ioc);  bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc); -void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, +bfa_status_t bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,  		u32 boot_env);  void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);  void bfa_ioc_error_isr(struct bfa_ioc_s *ioc); @@ -919,6 +921,7 @@ bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,  				 int *trclen);  bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,  	u32 *offset, int *buflen); +bfa_status_t bfa_ioc_fwsig_invalidate(struct bfa_ioc_s *ioc);  bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);  void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,  			struct bfi_ioc_image_hdr_s *fwhdr); @@ -956,6 +959,8 @@ bfa_status_t bfa_ablk_optrom_en(struct bfa_ablk_s *ablk,  bfa_status_t bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk,  		bfa_ablk_cbfn_t cbfn, void *cbarg); +bfa_status_t bfa_ioc_flash_img_get_chnk(struct bfa_ioc_s *ioc, u32 off, +				u32 *fwimg);  /*   * bfa mfg wwn API functions   */ diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c index e3b92874667..453c2f5b556 100644 --- a/drivers/scsi/bfa/bfa_ioc_cb.c +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -81,6 +81,29 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)  static bfa_boolean_t  bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)  { +	enum bfi_ioc_state alt_fwstate, cur_fwstate; +	struct bfi_ioc_image_hdr_s fwhdr; + +	cur_fwstate = bfa_ioc_cb_get_cur_ioc_fwstate(ioc); +	bfa_trc(ioc, cur_fwstate); +	alt_fwstate = bfa_ioc_cb_get_alt_ioc_fwstate(ioc); +	bfa_trc(ioc, alt_fwstate); + +	/* +	 * Uninit implies this is the only driver as of now. +	 */ +	if (cur_fwstate == BFI_IOC_UNINIT) +		return BFA_TRUE; +	/* +	 * Check if another driver with a different firmware is active +	 */ +	bfa_ioc_fwver_get(ioc, &fwhdr); +	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr) && +		alt_fwstate != BFI_IOC_DISABLED) { +		bfa_trc(ioc, alt_fwstate); +		return BFA_FALSE; +	} +  	return BFA_TRUE;  } diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index 6c41e57fd75..625225f3108 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -6758,7 +6758,7 @@ bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)  		dport->rp_pwwn = msg->info.teststart.pwwn;  		dport->rp_nwwn = msg->info.teststart.nwwn;  		dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm); -		bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO); +		bfa_dport_result_start(dport, msg->info.teststart.mode);  		break;  	case BFI_DPORT_SCN_SUBTESTSTART: diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index f8ca7becacc..7593b7c1d33 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -63,9 +63,9 @@ int		max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;  u32	bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;  u32	*bfi_image_cb, *bfi_image_ct, *bfi_image_ct2; -#define BFAD_FW_FILE_CB		"cbfw-3.2.1.1.bin" -#define BFAD_FW_FILE_CT		"ctfw-3.2.1.1.bin" -#define BFAD_FW_FILE_CT2	"ct2fw-3.2.1.1.bin" +#define BFAD_FW_FILE_CB		"cbfw-3.2.3.0.bin" +#define BFAD_FW_FILE_CT		"ctfw-3.2.3.0.bin" +#define BFAD_FW_FILE_CT2	"ct2fw-3.2.3.0.bin"  static u32 *bfad_load_fwimg(struct pci_dev *pdev);  static void bfad_free_fwimg(void); @@ -204,6 +204,7 @@ static void  bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)  {  	unsigned long flags; +	bfa_status_t ret;  	bfa_trc(bfad, event); @@ -217,7 +218,7 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)  		if (bfad_setup_intr(bfad)) {  			printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",  					bfad->inst_no); -			bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); +			bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);  			break;  		} @@ -242,8 +243,26 @@ bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)  			printk(KERN_WARNING  				"bfa %s: bfa init failed\n",  				bfad->pci_name); +			spin_lock_irqsave(&bfad->bfad_lock, flags); +			bfa_fcs_init(&bfad->bfa_fcs); +			spin_unlock_irqrestore(&bfad->bfad_lock, flags); + +			ret = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); +			if (ret != BFA_STATUS_OK) { +				init_completion(&bfad->comp); + +				spin_lock_irqsave(&bfad->bfad_lock, flags); +				bfad->pport.flags |= BFAD_PORT_DELETE; +				bfa_fcs_exit(&bfad->bfa_fcs); +				spin_unlock_irqrestore(&bfad->bfad_lock, flags); + +				wait_for_completion(&bfad->comp); + +				bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); +				break; +			}  			bfad->bfad_flags |= BFAD_HAL_INIT_FAIL; -			bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED); +			bfa_sm_send_event(bfad, BFAD_E_HAL_INIT_FAILED);  		}  		break; @@ -273,12 +292,14 @@ bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)  		spin_unlock_irqrestore(&bfad->bfad_lock, flags);  		retval = bfad_start_ops(bfad); -		if (retval != BFA_STATUS_OK) +		if (retval != BFA_STATUS_OK) { +			bfa_sm_set_state(bfad, bfad_sm_failed);  			break; +		}  		bfa_sm_set_state(bfad, bfad_sm_operational);  		break; -	case BFAD_E_INTR_INIT_FAILED: +	case BFAD_E_INIT_FAILED:  		bfa_sm_set_state(bfad, bfad_sm_uninit);  		kthread_stop(bfad->bfad_tsk);  		spin_lock_irqsave(&bfad->bfad_lock, flags); @@ -286,7 +307,7 @@ bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)  		spin_unlock_irqrestore(&bfad->bfad_lock, flags);  		break; -	case BFAD_E_INIT_FAILED: +	case BFAD_E_HAL_INIT_FAILED:  		bfa_sm_set_state(bfad, bfad_sm_failed);  		break;  	default: @@ -310,13 +331,8 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)  		break;  	case BFAD_E_STOP: -		if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) -			bfad_uncfg_pport(bfad); -		if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) { -			bfad_im_probe_undo(bfad); -			bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; -		} -		bfad_stop(bfad); +		bfa_sm_set_state(bfad, bfad_sm_fcs_exit); +		bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);  		break;  	case BFAD_E_EXIT_COMP: @@ -491,7 +507,7 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)  	struct bfad_vport_s   *vport;  	int rc; -	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); +	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC);  	if (!vport) {  		bfa_trc(bfad, 0);  		return; @@ -766,49 +782,20 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)  	bfad->pcidev = pdev;  	/* Adjust PCIe Maximum Read Request Size */ -	if (pcie_max_read_reqsz > 0) { -		int pcie_cap_reg; -		u16 pcie_dev_ctl; -		u16 mask = 0xffff; - -		switch (pcie_max_read_reqsz) { -		case 128: -			mask = 0x0; -			break; -		case 256: -			mask = 0x1000; -			break; -		case 512: -			mask = 0x2000; -			break; -		case 1024: -			mask = 0x3000; -			break; -		case 2048: -			mask = 0x4000; -			break; -		case 4096: -			mask = 0x5000; -			break; -		default: -			break; -		} - -		pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP); -		if (mask != 0xffff && pcie_cap_reg) { -			pcie_cap_reg += 0x08; -			pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl); -			if ((pcie_dev_ctl & 0x7000) != mask) { -				printk(KERN_WARNING "BFA[%s]: " +	if (pci_is_pcie(pdev) && pcie_max_read_reqsz) { +		if (pcie_max_read_reqsz >= 128 && +		    pcie_max_read_reqsz <= 4096 && +		    is_power_of_2(pcie_max_read_reqsz)) { +			int max_rq = pcie_get_readrq(pdev); +			printk(KERN_WARNING "BFA[%s]: "  				"pcie_max_read_request_size is %d, " -				"reset to %d\n", bfad->pci_name, -				(1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7, +				"reset to %d\n", bfad->pci_name, max_rq,  				pcie_max_read_reqsz); - -				pcie_dev_ctl &= ~0x7000; -				pci_write_config_word(pdev, pcie_cap_reg, -						pcie_dev_ctl | mask); -			} +			pcie_set_readrq(pdev, pcie_max_read_reqsz); +		} else { +			printk(KERN_WARNING "BFA[%s]: invalid " +			       "pcie_max_read_request_size %d ignored\n", +			       bfad->pci_name, pcie_max_read_reqsz);  		}  	} @@ -833,7 +820,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)  	/* Disable PCIE Advanced Error Recovery (AER) */  	pci_disable_pcie_error_reporting(pdev);  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  }  bfa_status_t @@ -854,7 +840,7 @@ bfad_drv_init(struct bfad_s *bfad)  		printk(KERN_WARNING  			"Not enough memory to attach all Brocade HBA ports, %s",  			"System may need more memory.\n"); -		goto out_hal_mem_alloc_failure; +		return BFA_STATUS_FAILED;  	}  	bfad->bfa.trcmod = bfad->trcmod; @@ -871,31 +857,11 @@ bfad_drv_init(struct bfad_s *bfad)  	bfad->bfa_fcs.trcmod = bfad->trcmod;  	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);  	bfad->bfa_fcs.fdmi_enabled = fdmi_enable; -	bfa_fcs_init(&bfad->bfa_fcs);  	spin_unlock_irqrestore(&bfad->bfad_lock, flags);  	bfad->bfad_flags |= BFAD_DRV_INIT_DONE; -	/* configure base port */ -	rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); -	if (rc != BFA_STATUS_OK) -		goto out_cfg_pport_fail; -  	return BFA_STATUS_OK; - -out_cfg_pport_fail: -	/* fcs exit - on cfg pport failure */ -	spin_lock_irqsave(&bfad->bfad_lock, flags); -	init_completion(&bfad->comp); -	bfad->pport.flags |= BFAD_PORT_DELETE; -	bfa_fcs_exit(&bfad->bfa_fcs); -	spin_unlock_irqrestore(&bfad->bfad_lock, flags); -	wait_for_completion(&bfad->comp); -	/* bfa detach - free hal memory */ -	bfa_detach(&bfad->bfa); -	bfad_hal_mem_release(bfad); -out_hal_mem_alloc_failure: -	return BFA_STATUS_FAILED;  }  void @@ -1039,13 +1005,19 @@ bfad_start_ops(struct bfad_s *bfad) {  	/* FCS driver info init */  	spin_lock_irqsave(&bfad->bfad_lock, flags);  	bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info); + +	if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE) +		bfa_fcs_update_cfg(&bfad->bfa_fcs); +	else +		bfa_fcs_init(&bfad->bfa_fcs); +  	spin_unlock_irqrestore(&bfad->bfad_lock, flags); -	/* -	 * FCS update cfg - reset the pwwn/nwwn of fabric base logical port -	 * with values learned during bfa_init firmware GETATTR REQ. -	 */ -	bfa_fcs_update_cfg(&bfad->bfa_fcs); +	if (!(bfad->bfad_flags & BFAD_CFG_PPORT_DONE)) { +		retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); +		if (retval != BFA_STATUS_OK) +			return BFA_STATUS_FAILED; +	}  	/* Setup fc host fixed attribute if the lk supports */  	bfad_fc_host_init(bfad->pport.im_port); @@ -1056,10 +1028,6 @@ bfad_start_ops(struct bfad_s *bfad) {  		printk(KERN_WARNING "bfad_im_probe failed\n");  		if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))  			bfa_sm_set_state(bfad, bfad_sm_failed); -		bfad_im_probe_undo(bfad); -		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE; -		bfad_uncfg_pport(bfad); -		bfad_stop(bfad);  		return BFA_STATUS_FAILED;  	} else  		bfad->bfad_flags |= BFAD_FC4_PROBE_DONE; @@ -1429,7 +1397,6 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)  	return 0;  out_bfad_sm_failure: -	bfa_detach(&bfad->bfa);  	bfad_hal_mem_release(bfad);  out_drv_init_failure:  	/* Remove the debugfs node for this bfad */ @@ -1564,7 +1531,7 @@ restart_bfa(struct bfad_s *bfad)  	if (bfad_setup_intr(bfad)) {  		dev_printk(KERN_WARNING, &pdev->dev,  			   "%s: bfad_setup_intr failed\n", bfad->pci_name); -		bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED); +		bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);  		return -1;  	} @@ -1832,7 +1799,7 @@ out:  static u32 *  bfad_load_fwimg(struct pci_dev *pdev)  { -	if (pdev->device == BFA_PCI_DEVICE_ID_CT2) { +	if (bfa_asic_id_ct2(pdev->device)) {  		if (bfi_image_ct2_size == 0)  			bfad_read_firmware(pdev, &bfi_image_ct2,  				&bfi_image_ct2_size, BFAD_FW_FILE_CT2); @@ -1842,12 +1809,14 @@ bfad_load_fwimg(struct pci_dev *pdev)  			bfad_read_firmware(pdev, &bfi_image_ct,  				&bfi_image_ct_size, BFAD_FW_FILE_CT);  		return bfi_image_ct; -	} else { +	} else if (bfa_asic_id_cb(pdev->device)) {  		if (bfi_image_cb_size == 0)  			bfad_read_firmware(pdev, &bfi_image_cb,  				&bfi_image_cb_size, BFAD_FW_FILE_CB);  		return bfi_image_cb;  	} + +	return NULL;  }  static void diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index e9a681d3122..40be670a1cb 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -593,11 +593,8 @@ bfad_im_vport_set_symbolic_name(struct fc_vport *fc_vport)  		return;  	spin_lock_irqsave(&bfad->bfad_lock, flags); -	if (strlen(sym_name) > 0) { -		strcpy(fcs_vport->lport.port_cfg.sym_name.symname, sym_name); -		bfa_fcs_lport_ns_util_send_rspn_id( -			BFA_FCS_GET_NS_FROM_PORT((&fcs_vport->lport)), NULL); -	} +	if (strlen(sym_name) > 0) +		bfa_fcs_lport_set_symname(&fcs_vport->lport, sym_name);  	spin_unlock_irqrestore(&bfad->bfad_lock, flags);  } diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 0467c349251..8994fb857ee 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c @@ -229,6 +229,18 @@ bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)  }  int +bfad_iocmd_ioc_fw_sig_inv(struct bfad_s *bfad, void *cmd) +{ +	struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd; +	unsigned long flags; + +	spin_lock_irqsave(&bfad->bfad_lock, flags); +	iocmd->status = bfa_ioc_fwsig_invalidate(&bfad->bfa.ioc); +	spin_unlock_irqrestore(&bfad->bfad_lock, flags); +	return 0; +} + +int  bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)  {  	struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd; @@ -2292,8 +2304,10 @@ bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)  	spin_lock_irqsave(&bfad->bfad_lock, flags); -	if (bfa_fcport_is_dport(&bfad->bfa)) +	if (bfa_fcport_is_dport(&bfad->bfa)) { +		spin_unlock_irqrestore(&bfad->bfad_lock, flags);  		return BFA_STATUS_DPORT_ERR; +	}  	if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||  		(fcport->topology == BFA_PORT_TOPOLOGY_LOOP)) @@ -2893,6 +2907,9 @@ bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,  	case IOCMD_IOC_PCIFN_CFG:  		rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);  		break; +	case IOCMD_IOC_FW_SIG_INV: +		rc = bfad_iocmd_ioc_fw_sig_inv(bfad, iocmd); +		break;  	case IOCMD_PCIFN_CREATE:  		rc = bfad_iocmd_pcifn_create(bfad, iocmd);  		break; diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h index 05f0fc9cf06..90abef69158 100644 --- a/drivers/scsi/bfa/bfad_bsg.h +++ b/drivers/scsi/bfa/bfad_bsg.h @@ -34,6 +34,7 @@ enum {  	IOCMD_IOC_RESET_FWSTATS,  	IOCMD_IOC_SET_ADAPTER_NAME,  	IOCMD_IOC_SET_PORT_NAME, +	IOCMD_IOC_FW_SIG_INV,  	IOCMD_IOCFC_GET_ATTR,  	IOCMD_IOCFC_SET_INTR,  	IOCMD_PORT_ENABLE, diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 78d3401bc16..8b97877d42c 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -57,7 +57,7 @@  #ifdef BFA_DRIVER_VERSION  #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION  #else -#define BFAD_DRIVER_VERSION    "3.2.21.1" +#define BFAD_DRIVER_VERSION    "3.2.23.0"  #endif  #define BFAD_PROTO_NAME FCPI_NAME @@ -240,8 +240,8 @@ enum bfad_sm_event {  	BFAD_E_KTHREAD_CREATE_FAILED	= 2,  	BFAD_E_INIT			= 3,  	BFAD_E_INIT_SUCCESS		= 4, -	BFAD_E_INIT_FAILED		= 5, -	BFAD_E_INTR_INIT_FAILED		= 6, +	BFAD_E_HAL_INIT_FAILED		= 5, +	BFAD_E_INIT_FAILED		= 6,  	BFAD_E_FCS_EXIT_COMP		= 7,  	BFAD_E_EXIT_COMP		= 8,  	BFAD_E_STOP			= 9 diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 9967f9c1485..f067332bf76 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -73,9 +73,14 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,  		break; -	case BFI_IOIM_STS_ABORTED:  	case BFI_IOIM_STS_TIMEDOUT: +		host_status = DID_TIME_OUT; +		cmnd->result = ScsiResult(host_status, 0); +		break;  	case BFI_IOIM_STS_PATHTOV: +		host_status = DID_TRANSPORT_DISRUPTED; +		cmnd->result = ScsiResult(host_status, 0); +		break;  	default:  		host_status = DID_ERROR;  		cmnd->result = ScsiResult(host_status, 0); diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 37bd2564e83..9ef91f907de 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -46,6 +46,7 @@   */  #define	BFI_FLASH_CHUNK_SZ			256	/*  Flash chunk size */  #define	BFI_FLASH_CHUNK_SZ_WORDS	(BFI_FLASH_CHUNK_SZ/sizeof(u32)) +#define BFI_FLASH_IMAGE_SZ		0x100000  /*   * Msg header common to all msgs @@ -324,7 +325,29 @@ struct bfi_ioc_getattr_reply_s {  #define BFI_IOC_TRC_ENTS	256  #define BFI_IOC_FW_SIGNATURE	(0xbfadbfad) +#define BFA_IOC_FW_INV_SIGN	(0xdeaddead)  #define BFI_IOC_MD5SUM_SZ	4 + +struct bfi_ioc_fwver_s { +#ifdef __BIG_ENDIAN +	uint8_t patch; +	uint8_t maint; +	uint8_t minor; +	uint8_t major; +	uint8_t rsvd[2]; +	uint8_t build; +	uint8_t phase; +#else +	uint8_t major; +	uint8_t minor; +	uint8_t maint; +	uint8_t patch; +	uint8_t phase; +	uint8_t build; +	uint8_t rsvd[2]; +#endif +}; +  struct bfi_ioc_image_hdr_s {  	u32	signature;	/* constant signature		*/  	u8	asic_gen;	/* asic generation		*/ @@ -333,10 +356,18 @@ struct bfi_ioc_image_hdr_s {  	u8	port1_mode;	/* device mode for port 1	*/  	u32	exec;		/* exec vector			*/  	u32	bootenv;	/* fimware boot env		*/ -	u32	rsvd_b[4]; +	u32	rsvd_b[2]; +	struct bfi_ioc_fwver_s	fwver;  	u32	md5sum[BFI_IOC_MD5SUM_SZ];  }; +enum bfi_ioc_img_ver_cmp_e { +	BFI_IOC_IMG_VER_INCOMP, +	BFI_IOC_IMG_VER_OLD, +	BFI_IOC_IMG_VER_SAME, +	BFI_IOC_IMG_VER_BETTER +}; +  #define BFI_FWBOOT_DEVMODE_OFF		4  #define BFI_FWBOOT_TYPE_OFF		8  #define BFI_FWBOOT_ENV_OFF		12 @@ -346,6 +377,12 @@ struct bfi_ioc_image_hdr_s {  	 ((u32)(__p0_mode)) << 8 |		\  	 ((u32)(__p1_mode))) +enum bfi_fwboot_type { +	BFI_FWBOOT_TYPE_NORMAL  = 0, +	BFI_FWBOOT_TYPE_FLASH   = 1, +	BFI_FWBOOT_TYPE_MEMTEST = 2, +}; +  #define BFI_FWBOOT_TYPE_NORMAL	0  #define BFI_FWBOOT_TYPE_MEMTEST	2  #define BFI_FWBOOT_ENV_OS       0 @@ -1107,7 +1144,8 @@ struct bfi_diag_dport_scn_teststart_s {  	wwn_t	pwwn;	/* switch port wwn. 8 bytes */  	wwn_t	nwwn;	/* switch node wwn. 8 bytes */  	u8	type;	/* bfa_diag_dport_test_type_e */ -	u8	rsvd[3]; +	u8	mode;	/* bfa_diag_dport_test_opmode */ +	u8	rsvd[2];  	u32	numfrm; /* from switch uint in 1M */  }; diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index d7ca9305ff4..6a976657b47 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -64,7 +64,7 @@  #include "bnx2fc_constants.h"  #define BNX2FC_NAME		"bnx2fc" -#define BNX2FC_VERSION		"1.0.14" +#define BNX2FC_VERSION		"2.4.2"  #define PFX			"bnx2fc: " @@ -367,6 +367,7 @@ struct bnx2fc_rport {  	atomic_t num_active_ios;  	u32 flush_in_prog;  	unsigned long timestamp; +	unsigned long retry_delay_timestamp;  	struct list_head free_task_list;  	struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1];  	struct list_head active_cmd_queue; diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 69ac55495c1..785d0d71781 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);  #define DRV_MODULE_NAME		"bnx2fc"  #define DRV_MODULE_VERSION	BNX2FC_VERSION -#define DRV_MODULE_RELDATE	"Mar 08, 2013" +#define DRV_MODULE_RELDATE	"Dec 11, 2013"  static char version[] = @@ -464,7 +464,7 @@ static int bnx2fc_l2_rcv_thread(void *arg)  	struct fcoe_percpu_s *bg = arg;  	struct sk_buff *skb; -	set_user_nice(current, -20); +	set_user_nice(current, MIN_NICE);  	set_current_state(TASK_INTERRUPTIBLE);  	while (!kthread_should_stop()) {  		schedule(); @@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)  	skb_pull(skb, sizeof(struct fcoe_hdr));  	fr_len = skb->len - sizeof(struct fcoe_crc_eof); -	stats = per_cpu_ptr(lport->stats, get_cpu()); -	stats->RxFrames++; -	stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; -  	fp = (struct fc_frame *)skb;  	fc_frame_init(fp);  	fr_dev(fp) = lport;  	fr_sof(fp) = hp->fcoe_sof;  	if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { -		put_cpu();  		kfree_skb(skb);  		return;  	}  	fr_eof(fp) = crc_eof.fcoe_eof;  	fr_crc(fp) = crc_eof.fcoe_crc32;  	if (pskb_trim(skb, fr_len)) { -		put_cpu();  		kfree_skb(skb);  		return;  	} @@ -542,10 +536,8 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)  	vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));  	if (vn_port) {  		port = lport_priv(vn_port); -		if (compare_ether_addr(port->data_src_addr, dest_mac) -		    != 0) { +		if (!ether_addr_equal(port->data_src_addr, dest_mac)) {  			BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); -			put_cpu();  			kfree_skb(skb);  			return;  		} @@ -553,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)  	if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&  	    fh->fh_type == FC_TYPE_FCP) {  		/* Drop FCP data. We dont this in L2 path */ -		put_cpu();  		kfree_skb(skb);  		return;  	} @@ -563,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)  		case ELS_LOGO:  			if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {  				/* drop non-FIP LOGO */ -				put_cpu();  				kfree_skb(skb);  				return;  			} @@ -573,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)  	if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {  		/* Drop incoming ABTS */ -		put_cpu();  		kfree_skb(skb);  		return;  	} +	stats = per_cpu_ptr(lport->stats, smp_processor_id()); +	stats->RxFrames++; +	stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; +  	if (le32_to_cpu(fr_crc(fp)) !=  			~crc32(~0, skb->data, fr_len)) {  		if (stats->InvalidCRCCount < 5)  			printk(KERN_WARNING PFX "dropping frame with "  			       "CRC error\n");  		stats->InvalidCRCCount++; -		put_cpu();  		kfree_skb(skb);  		return;  	} -	put_cpu();  	fc_exch_recv(lport, fp);  } @@ -603,7 +594,7 @@ int bnx2fc_percpu_io_thread(void *arg)  	struct bnx2fc_work *work, *tmp;  	LIST_HEAD(work_list); -	set_user_nice(current, -20); +	set_user_nice(current, MIN_NICE);  	set_current_state(TASK_INTERRUPTIBLE);  	while (!kthread_should_stop()) {  		schedule(); @@ -851,6 +842,9 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,  				__bnx2fc_destroy(interface);  		}  		mutex_unlock(&bnx2fc_dev_lock); + +		/* Ensure ALL destroy work has been completed before return */ +		flush_workqueue(bnx2fc_wq);  		return;  	default: @@ -1381,6 +1375,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,  		return NULL;  	}  	ctlr = fcoe_ctlr_device_priv(ctlr_dev); +	ctlr->cdev = ctlr_dev;  	interface = fcoe_ctlr_priv(ctlr);  	dev_hold(netdev);  	kref_init(&interface->kref); @@ -2004,6 +1999,24 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)  		set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);  } +/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */ +static int __bnx2fc_disable(struct fcoe_ctlr *ctlr) +{ +	struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + +	if (interface->enabled == true) { +		if (!ctlr->lp) { +			pr_err(PFX "__bnx2fc_disable: lport not found\n"); +			return -ENODEV; +		} else { +			interface->enabled = false; +			fcoe_ctlr_link_down(ctlr); +			fcoe_clean_pending_queue(ctlr->lp); +		} +	} +	return 0; +} +  /**   * Deperecated: Use bnx2fc_enabled()   */ @@ -2018,20 +2031,34 @@ static int bnx2fc_disable(struct net_device *netdev)  	interface = bnx2fc_interface_lookup(netdev);  	ctlr = bnx2fc_to_ctlr(interface); -	if (!interface || !ctlr->lp) { + +	if (!interface) {  		rc = -ENODEV; -		printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); +		pr_err(PFX "bnx2fc_disable: interface not found\n");  	} else { -		interface->enabled = false; -		fcoe_ctlr_link_down(ctlr); -		fcoe_clean_pending_queue(ctlr->lp); +		rc = __bnx2fc_disable(ctlr);  	} -  	mutex_unlock(&bnx2fc_dev_lock);  	rtnl_unlock();  	return rc;  } +static int __bnx2fc_enable(struct fcoe_ctlr *ctlr) +{ +	struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + +	if (interface->enabled == false) { +		if (!ctlr->lp) { +			pr_err(PFX "__bnx2fc_enable: lport not found\n"); +			return -ENODEV; +		} else if (!bnx2fc_link_ok(ctlr->lp)) { +			fcoe_ctlr_link_up(ctlr); +			interface->enabled = true; +		} +	} +	return 0; +} +  /**   * Deprecated: Use bnx2fc_enabled()   */ @@ -2046,12 +2073,11 @@ static int bnx2fc_enable(struct net_device *netdev)  	interface = bnx2fc_interface_lookup(netdev);  	ctlr = bnx2fc_to_ctlr(interface); -	if (!interface || !ctlr->lp) { +	if (!interface) {  		rc = -ENODEV; -		printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); -	} else if (!bnx2fc_link_ok(ctlr->lp)) { -		fcoe_ctlr_link_up(ctlr); -		interface->enabled = true; +		pr_err(PFX "bnx2fc_enable: interface not found\n"); +	} else { +		rc = __bnx2fc_enable(ctlr);  	}  	mutex_unlock(&bnx2fc_dev_lock); @@ -2072,14 +2098,12 @@ static int bnx2fc_enable(struct net_device *netdev)  static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)  {  	struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); -	struct fc_lport *lport = ctlr->lp; -	struct net_device *netdev = bnx2fc_netdev(lport);  	switch (cdev->enabled) {  	case FCOE_CTLR_ENABLED: -		return bnx2fc_enable(netdev); +		return __bnx2fc_enable(ctlr);  	case FCOE_CTLR_DISABLED: -		return bnx2fc_disable(netdev); +		return __bnx2fc_disable(ctlr);  	case FCOE_CTLR_UNUSED:  	default:  		return -ENOTSUPP; @@ -2360,6 +2384,9 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)  			__bnx2fc_destroy(interface);  	mutex_unlock(&bnx2fc_dev_lock); +	/* Ensure ALL destroy work has been completed before return */ +	flush_workqueue(bnx2fc_wq); +  	bnx2fc_ulp_stop(hba);  	/* unregister cnic device */  	if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) @@ -2557,12 +2584,16 @@ static int __init bnx2fc_mod_init(void)  		spin_lock_init(&p->fp_work_lock);  	} +	cpu_notifier_register_begin(); +  	for_each_online_cpu(cpu) {  		bnx2fc_percpu_thread_create(cpu);  	}  	/* Initialize per CPU interrupt thread */ -	register_hotcpu_notifier(&bnx2fc_cpu_notifier); +	__register_hotcpu_notifier(&bnx2fc_cpu_notifier); + +	cpu_notifier_register_done();  	cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); @@ -2627,13 +2658,17 @@ static void __exit bnx2fc_mod_exit(void)  	if (l2_thread)  		kthread_stop(l2_thread); -	unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); +	cpu_notifier_register_begin();  	/* Destroy per cpu threads */  	for_each_online_cpu(cpu) {  		bnx2fc_percpu_thread_destroy(cpu);  	} +	__unregister_hotcpu_notifier(&bnx2fc_cpu_notifier); + +	cpu_notifier_register_done(); +  	destroy_workqueue(bnx2fc_wq);  	/*  	 * detach from scsi transport diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 46a37657307..512aed3ae4f 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1966,26 +1966,29 @@ static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)  {  	int i;  	int segment_count; -	int hash_table_size;  	u32 *pbl; -	segment_count = hba->hash_tbl_segment_count; -	hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * -		sizeof(struct fcoe_hash_table_entry); +	if (hba->hash_tbl_segments) { -	pbl = hba->hash_tbl_pbl; -	for (i = 0; i < segment_count; ++i) { -		dma_addr_t dma_address; +		pbl = hba->hash_tbl_pbl; +		if (pbl) { +			segment_count = hba->hash_tbl_segment_count; +			for (i = 0; i < segment_count; ++i) { +				dma_addr_t dma_address; -		dma_address = le32_to_cpu(*pbl); -		++pbl; -		dma_address += ((u64)le32_to_cpu(*pbl)) << 32; -		++pbl; -		dma_free_coherent(&hba->pcidev->dev, -				  BNX2FC_HASH_TBL_CHUNK_SIZE, -				  hba->hash_tbl_segments[i], -				  dma_address); +				dma_address = le32_to_cpu(*pbl); +				++pbl; +				dma_address += ((u64)le32_to_cpu(*pbl)) << 32; +				++pbl; +				dma_free_coherent(&hba->pcidev->dev, +						  BNX2FC_HASH_TBL_CHUNK_SIZE, +						  hba->hash_tbl_segments[i], +						  dma_address); +			} +		} +		kfree(hba->hash_tbl_segments); +		hba->hash_tbl_segments = NULL;  	}  	if (hba->hash_tbl_pbl) { @@ -2023,7 +2026,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)  	dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);  	if (!dma_segment_array) {  		printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); -		return -ENOMEM; +		goto cleanup_ht;  	}  	for (i = 0; i < segment_count; ++i) { @@ -2034,15 +2037,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)  					   GFP_KERNEL);  		if (!hba->hash_tbl_segments[i]) {  			printk(KERN_ERR PFX "hash segment alloc failed\n"); -			while (--i >= 0) { -				dma_free_coherent(&hba->pcidev->dev, -						    BNX2FC_HASH_TBL_CHUNK_SIZE, -						    hba->hash_tbl_segments[i], -						    dma_segment_array[i]); -				hba->hash_tbl_segments[i] = NULL; -			} -			kfree(dma_segment_array); -			return -ENOMEM; +			goto cleanup_dma;  		}  		memset(hba->hash_tbl_segments[i], 0,  		       BNX2FC_HASH_TBL_CHUNK_SIZE); @@ -2054,8 +2049,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)  					       GFP_KERNEL);  	if (!hba->hash_tbl_pbl) {  		printk(KERN_ERR PFX "hash table pbl alloc failed\n"); -		kfree(dma_segment_array); -		return -ENOMEM; +		goto cleanup_dma;  	}  	memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); @@ -2080,6 +2074,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)  	}  	kfree(dma_segment_array);  	return 0; + +cleanup_dma: +	for (i = 0; i < segment_count; ++i) { +		if (hba->hash_tbl_segments[i]) +			dma_free_coherent(&hba->pcidev->dev, +					    BNX2FC_HASH_TBL_CHUNK_SIZE, +					    hba->hash_tbl_segments[i], +					    dma_segment_array[i]); +	} + +	kfree(dma_segment_array); + +cleanup_ht: +	kfree(hba->hash_tbl_segments); +	hba->hash_tbl_segments = NULL; +	return -ENOMEM;  }  /** diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 575142e92d9..7bc47fc7c68 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -282,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)  				       arr_sz, GFP_KERNEL);  	if (!cmgr->free_list_lock) {  		printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); +		kfree(cmgr->free_list); +		cmgr->free_list = NULL;  		goto mem_err;  	} @@ -594,13 +596,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)  		mp_req->mp_resp_bd = NULL;  	}  	if (mp_req->req_buf) { -		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, +		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  				     mp_req->req_buf,  				     mp_req->req_buf_dma);  		mp_req->req_buf = NULL;  	}  	if (mp_req->resp_buf) { -		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, +		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  				     mp_req->resp_buf,  				     mp_req->resp_buf_dma);  		mp_req->resp_buf = NULL; @@ -622,7 +624,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)  	mp_req->req_len = sizeof(struct fcp_cmnd);  	io_req->data_xfer_len = mp_req->req_len; -	mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, +	mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  					     &mp_req->req_buf_dma,  					     GFP_ATOMIC);  	if (!mp_req->req_buf) { @@ -631,7 +633,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)  		return FAILED;  	} -	mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, +	mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  					      &mp_req->resp_buf_dma,  					      GFP_ATOMIC);  	if (!mp_req->resp_buf) { @@ -639,8 +641,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)  		bnx2fc_free_mp_resc(io_req);  		return FAILED;  	} -	memset(mp_req->req_buf, 0, PAGE_SIZE); -	memset(mp_req->resp_buf, 0, PAGE_SIZE); +	memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); +	memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);  	/* Allocate and map mp_req_bd and mp_resp_bd */  	sz = sizeof(struct fcoe_bd_ctx); @@ -665,7 +667,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)  	mp_req_bd = mp_req->mp_req_bd;  	mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;  	mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); -	mp_req_bd->buf_len = PAGE_SIZE; +	mp_req_bd->buf_len = CNIC_PAGE_SIZE;  	mp_req_bd->flags = 0;  	/* @@ -677,7 +679,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)  	addr = mp_req->resp_buf_dma;  	mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;  	mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); -	mp_resp_bd->buf_len = PAGE_SIZE; +	mp_resp_bd->buf_len = CNIC_PAGE_SIZE;  	mp_resp_bd->flags = 0;  	return SUCCESS; @@ -1246,6 +1248,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)  			kref_put(&io_req->refcount,  				 bnx2fc_cmd_release); /* drop timer hold */  		rc = bnx2fc_expl_logo(lport, io_req); +		/* This only occurs when an task abort was requested while ABTS +		   is in progress.  Setting the IO_CLEANUP flag will skip the +		   RRQ process in the case when the fw generated SCSI_CMD cmpl +		   was a result from the ABTS request rather than the CLEANUP +		   request */ +		set_bit(BNX2FC_FLAG_IO_CLEANUP,	&io_req->req_flags);  		goto out;  	} @@ -1865,7 +1873,15 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,  		rc = SCSI_MLQUEUE_TARGET_BUSY;  		goto exit_qcmd;  	} - +	if (tgt->retry_delay_timestamp) { +		if (time_after(jiffies, tgt->retry_delay_timestamp)) { +			tgt->retry_delay_timestamp = 0; +		} else { +			/* If retry_delay timer is active, flow off the ML */ +			rc = SCSI_MLQUEUE_TARGET_BUSY; +			goto exit_qcmd; +		} +	}  	io_req = bnx2fc_cmd_alloc(tgt);  	if (!io_req) {  		rc = SCSI_MLQUEUE_HOST_BUSY; @@ -1955,6 +1971,15 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,  				 " fcp_resid = 0x%x\n",  				io_req->cdb_status, io_req->fcp_resid);  			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + +			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || +			    io_req->cdb_status == SAM_STAT_BUSY) { +				/* Set the jiffies + retry_delay_timer * 100ms +				   for the rport/tgt */ +				tgt->retry_delay_timestamp = jiffies + +					fcp_rsp->retry_delay_timer * HZ / 10; +			} +  		}  		if (io_req->fcp_resid)  			scsi_set_resid(sc_cmd, io_req->fcp_resid); diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index 4d93177dfb5..6870cf6781d 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -386,6 +386,7 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,  	tgt->rq_prod_idx = 0x8000;  	tgt->rq_cons_idx = 0;  	atomic_set(&tgt->num_active_ios, 0); +	tgt->retry_delay_timestamp = 0;  	if (rdata->flags & FC_RP_FLAGS_RETRY &&  	    rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && @@ -673,7 +674,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  	/* Allocate and map SQ */  	tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; -	tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; +	tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & +			   CNIC_PAGE_MASK;  	tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,  				     &tgt->sq_dma, GFP_KERNEL); @@ -686,7 +688,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  	/* Allocate and map CQ */  	tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; -	tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; +	tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & +			   CNIC_PAGE_MASK;  	tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,  				     &tgt->cq_dma, GFP_KERNEL); @@ -699,7 +702,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  	/* Allocate and map RQ and RQ PBL */  	tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; -	tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; +	tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & +			   CNIC_PAGE_MASK;  	tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,  					&tgt->rq_dma, GFP_KERNEL); @@ -710,8 +714,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  	}  	memset(tgt->rq, 0, tgt->rq_mem_size); -	tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *); -	tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; +	tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); +	tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & +			   CNIC_PAGE_MASK;  	tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,  					 &tgt->rq_pbl_dma, GFP_KERNEL); @@ -722,7 +727,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  	}  	memset(tgt->rq_pbl, 0, tgt->rq_pbl_size); -	num_pages = tgt->rq_mem_size / PAGE_SIZE; +	num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;  	page = tgt->rq_dma;  	pbl = (u32 *)tgt->rq_pbl; @@ -731,13 +736,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  		pbl++;  		*pbl = (u32)((u64)page >> 32);  		pbl++; -		page += PAGE_SIZE; +		page += CNIC_PAGE_SIZE;  	}  	/* Allocate and map XFERQ */  	tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; -	tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) & -			       PAGE_MASK; +	tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & +			       CNIC_PAGE_MASK;  	tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,  					&tgt->xferq_dma, GFP_KERNEL); @@ -750,8 +755,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  	/* Allocate and map CONFQ & CONFQ PBL */  	tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; -	tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) & -			       PAGE_MASK; +	tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & +			       CNIC_PAGE_MASK;  	tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,  					&tgt->confq_dma, GFP_KERNEL); @@ -763,9 +768,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  	memset(tgt->confq, 0, tgt->confq_mem_size);  	tgt->confq_pbl_size = -		(tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *); +		(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);  	tgt->confq_pbl_size = -		(tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; +		(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;  	tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,  					    tgt->confq_pbl_size, @@ -777,7 +782,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  	}  	memset(tgt->confq_pbl, 0, tgt->confq_pbl_size); -	num_pages = tgt->confq_mem_size / PAGE_SIZE; +	num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;  	page = tgt->confq_dma;  	pbl = (u32 *)tgt->confq_pbl; @@ -786,7 +791,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  		pbl++;  		*pbl = (u32)((u64)page >> 32);  		pbl++; -		page += PAGE_SIZE; +		page += CNIC_PAGE_SIZE;  	}  	/* Allocate and map ConnDB */ @@ -805,8 +810,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,  	/* Allocate and map LCQ */  	tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; -	tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) & -			     PAGE_MASK; +	tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & +			     CNIC_PAGE_MASK;  	tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,  				      &tgt->lcq_dma, GFP_KERNEL); diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 5be718c241c..d6d491c2f00 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)  	 * yield integral num of page buffers  	 */  	/* adjust SQ */ -	num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; +	num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;  	if (hba->max_sqes < num_elements_per_pg)  		hba->max_sqes = num_elements_per_pg;  	else if (hba->max_sqes % num_elements_per_pg) @@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)  				 ~(num_elements_per_pg - 1);  	/* adjust CQ */ -	num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE; +	num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;  	if (hba->max_cqes < num_elements_per_pg)  		hba->max_cqes = num_elements_per_pg;  	else if (hba->max_cqes % num_elements_per_pg) @@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)  				 ~(num_elements_per_pg - 1);  	/* adjust RQ */ -	num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE; +	num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;  	if (hba->max_rqes < num_elements_per_pg)  		hba->max_rqes = num_elements_per_pg;  	else if (hba->max_rqes % num_elements_per_pg) @@ -126,7 +126,7 @@ static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)  /**   * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification - * @ep:		endpoint (transport indentifier) structure + * @ep:		endpoint (transport identifier) structure   * @action:	action, ARM or DISARM. For now only ARM_CQE is used   *   * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt @@ -756,7 +756,7 @@ void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)  /**   * bnx2i_send_conn_destroy - initiates iscsi connection teardown process   * @hba:	adapter structure pointer - * @ep:		endpoint (transport indentifier) structure + * @ep:		endpoint (transport identifier) structure   *   * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate   * 	iscsi connection context clean-up process @@ -791,7 +791,7 @@ int bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)  /**   * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process   * @hba: 		adapter structure pointer - * @ep: 		endpoint (transport indentifier) structure + * @ep: 		endpoint (transport identifier) structure   *   * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE   */ @@ -851,7 +851,7 @@ static int bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,  /**   * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation   * @hba: 		adapter structure pointer - * @ep: 		endpoint (transport indentifier) structure + * @ep: 		endpoint (transport identifier) structure   *   * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE   */ @@ -920,7 +920,7 @@ static int bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,   * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process   *   * @hba: 		adapter structure pointer - * @ep: 		endpoint (transport indentifier) structure + * @ep: 		endpoint (transport identifier) structure   *   * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE   */ @@ -939,7 +939,7 @@ int bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)  /**   * setup_qp_page_tables - iscsi QP page table setup function - * @ep:		endpoint (transport indentifier) structure + * @ep:		endpoint (transport identifier) structure   *   * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires   * 	64-bit address in big endian format. Whereas 10G/sec (57710) requires @@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)  	/* SQ page table */  	memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size); -	num_pages = ep->qp.sq_mem_size / PAGE_SIZE; +	num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;  	page = ep->qp.sq_phys;  	if (cnic_dev_10g) @@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)  			ptbl++;  			*ptbl = (u32) ((u64) page >> 32);  			ptbl++; -			page += PAGE_SIZE; +			page += CNIC_PAGE_SIZE;  		} else {  			/* PTE is written in big endian format for  			 * 5706/5708/5709 devices */ @@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)  			ptbl++;  			*ptbl = (u32) page;  			ptbl++; -			page += PAGE_SIZE; +			page += CNIC_PAGE_SIZE;  		}  	}  	/* RQ page table */  	memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size); -	num_pages = ep->qp.rq_mem_size / PAGE_SIZE; +	num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;  	page = ep->qp.rq_phys;  	if (cnic_dev_10g) @@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)  			ptbl++;  			*ptbl = (u32) ((u64) page >> 32);  			ptbl++; -			page += PAGE_SIZE; +			page += CNIC_PAGE_SIZE;  		} else {  			/* PTE is written in big endian format for  			 * 5706/5708/5709 devices */ @@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)  			ptbl++;  			*ptbl = (u32) page;  			ptbl++; -			page += PAGE_SIZE; +			page += CNIC_PAGE_SIZE;  		}  	}  	/* CQ page table */  	memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size); -	num_pages = ep->qp.cq_mem_size / PAGE_SIZE; +	num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;  	page = ep->qp.cq_phys;  	if (cnic_dev_10g) @@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)  			ptbl++;  			*ptbl = (u32) ((u64) page >> 32);  			ptbl++; -			page += PAGE_SIZE; +			page += CNIC_PAGE_SIZE;  		} else {  			/* PTE is written in big endian format for  			 * 5706/5708/5709 devices */ @@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)  			ptbl++;  			*ptbl = (u32) page;  			ptbl++; -			page += PAGE_SIZE; +			page += CNIC_PAGE_SIZE;  		}  	}  } @@ -1046,7 +1046,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)  /**   * bnx2i_alloc_qp_resc - allocates required resources for QP.   * @hba:	adapter structure pointer - * @ep:		endpoint (transport indentifier) structure + * @ep:		endpoint (transport identifier) structure   *   * Allocate QP (transport layer for iSCSI connection) resources, DMA'able   *	memory for SQ/RQ/CQ and page tables. EP structure elements such @@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)  	/* Allocate page table memory for SQ which is page aligned */  	ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;  	ep->qp.sq_mem_size = -		(ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; +		(ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;  	ep->qp.sq_pgtbl_size = -		(ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *); +		(ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);  	ep->qp.sq_pgtbl_size = -		(ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; +		(ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;  	ep->qp.sq_pgtbl_virt =  		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size, @@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)  	/* Allocate page table memory for CQ which is page aligned */  	ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;  	ep->qp.cq_mem_size = -		(ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; +		(ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;  	ep->qp.cq_pgtbl_size = -		(ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *); +		(ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);  	ep->qp.cq_pgtbl_size = -		(ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; +		(ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;  	ep->qp.cq_pgtbl_virt =  		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size, @@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)  	/* Allocate page table memory for RQ which is page aligned */  	ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;  	ep->qp.rq_mem_size = -		(ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK; +		(ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;  	ep->qp.rq_pgtbl_size = -		(ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *); +		(ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);  	ep->qp.rq_pgtbl_size = -		(ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK; +		(ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;  	ep->qp.rq_pgtbl_virt =  		dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size, @@ -1191,7 +1191,7 @@ mem_alloc_err:  /**   * bnx2i_free_qp_resc - free memory resources held by QP   * @hba:	adapter structure pointer - * @ep:	endpoint (transport indentifier) structure + * @ep:	endpoint (transport identifier) structure   *   * Free QP resources - SQ/RQ/CQ memory and page tables.   */ @@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)  	bnx2i_adjust_qp_size(hba);  	iscsi_init.flags = -		ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT; +		(CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;  	if (en_tcp_dack)  		iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;  	iscsi_init.reserved0 = 0; @@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)  			((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));  	iscsi_init.num_ccells_per_conn = hba->num_ccell;  	iscsi_init.num_tasks_per_conn = hba->max_sqes; -	iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE; +	iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;  	iscsi_init.sq_num_wqes = hba->max_sqes;  	iscsi_init.cq_log_wqes_per_page = -		(u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE); +		(u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);  	iscsi_init.cq_num_wqes = hba->max_cqes;  	iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE + -				   (PAGE_SIZE - 1)) / PAGE_SIZE; +				   (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;  	iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE + -				   (PAGE_SIZE - 1)) / PAGE_SIZE; +				   (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;  	iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;  	iscsi_init.rq_num_wqes = hba->max_rqes; @@ -1361,7 +1361,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,  	u32 datalen = 0;  	resp_cqe = (struct bnx2i_cmd_response *)cqe; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->back_lock);  	task = iscsi_itt_to_task(conn,  				 resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);  	if (!task) @@ -1432,7 +1432,7 @@ done:  	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,  			     conn->data, datalen);  fail: -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->back_lock);  	return 0;  } @@ -1457,7 +1457,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,  	int pad_len;  	login = (struct bnx2i_login_response *) cqe; -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	task = iscsi_itt_to_task(conn,  				 login->itt & ISCSI_LOGIN_RESPONSE_INDEX);  	if (!task) @@ -1500,7 +1500,7 @@ static int bnx2i_process_login_resp(struct iscsi_session *session,  		bnx2i_conn->gen_pdu.resp_buf,  		bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);  done: -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  	return 0;  } @@ -1525,7 +1525,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,  	int pad_len;  	text = (struct bnx2i_text_response *) cqe; -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	task = iscsi_itt_to_task(conn, text->itt & ISCSI_LOGIN_RESPONSE_INDEX);  	if (!task)  		goto done; @@ -1561,7 +1561,7 @@ static int bnx2i_process_text_resp(struct iscsi_session *session,  			     bnx2i_conn->gen_pdu.resp_wr_ptr -  			     bnx2i_conn->gen_pdu.resp_buf);  done: -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  	return 0;  } @@ -1584,7 +1584,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,  	struct iscsi_tm_rsp *resp_hdr;  	tmf_cqe = (struct bnx2i_tmf_response *)cqe; -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	task = iscsi_itt_to_task(conn,  				 tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);  	if (!task) @@ -1600,7 +1600,7 @@ static int bnx2i_process_tmf_resp(struct iscsi_session *session,  	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);  done: -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  	return 0;  } @@ -1623,7 +1623,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,  	struct iscsi_logout_rsp *resp_hdr;  	logout = (struct bnx2i_logout_response *) cqe; -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	task = iscsi_itt_to_task(conn,  				 logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);  	if (!task) @@ -1647,7 +1647,7 @@ static int bnx2i_process_logout_resp(struct iscsi_session *session,  	bnx2i_conn->ep->state = EP_STATE_LOGOUT_RESP_RCVD;  done: -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  	return 0;  } @@ -1668,12 +1668,12 @@ static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,  	struct iscsi_task *task;  	nop_in = (struct bnx2i_nop_in_msg *)cqe; -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	task = iscsi_itt_to_task(conn,  				 nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);  	if (task)  		__iscsi_put_task(task); -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  }  /** @@ -1712,7 +1712,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,  	nop_in = (struct bnx2i_nop_in_msg *)cqe; -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;  	memset(hdr, 0, sizeof(struct iscsi_hdr));  	hdr->opcode = nop_in->op_code; @@ -1738,7 +1738,7 @@ static int bnx2i_process_nopin_mesg(struct iscsi_session *session,  	}  done:  	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  	return tgt_async_nop;  } @@ -1771,7 +1771,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,  		return;  	} -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;  	memset(resp_hdr, 0, sizeof(struct iscsi_hdr));  	resp_hdr->opcode = async_cqe->op_code; @@ -1790,7 +1790,7 @@ static void bnx2i_process_async_mesg(struct iscsi_session *session,  	__iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,  			     (struct iscsi_hdr *)resp_hdr, NULL, 0); -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  } @@ -1817,7 +1817,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,  	} else  		bnx2i_unsol_pdu_adjust_rq(bnx2i_conn); -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;  	memset(hdr, 0, sizeof(struct iscsi_hdr));  	hdr->opcode = reject->op_code; @@ -1828,7 +1828,7 @@ static void bnx2i_process_reject_mesg(struct iscsi_session *session,  	hdr->ffffffff = cpu_to_be32(RESERVED_ITT);  	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,  			     reject->data_length); -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  }  /** @@ -1848,13 +1848,13 @@ static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,  	struct iscsi_task *task;  	cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe; -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	task = iscsi_itt_to_task(conn,  			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);  	if (!task)  		printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",  			cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX); -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  	complete(&bnx2i_conn->cmd_cleanup_cmpl);  } @@ -1870,7 +1870,7 @@ int bnx2i_percpu_io_thread(void *arg)  	struct bnx2i_work *work, *tmp;  	LIST_HEAD(work_list); -	set_user_nice(current, -20); +	set_user_nice(current, MIN_NICE);  	while (!kthread_should_stop()) {  		spin_lock_bh(&p->p_work_lock); @@ -1921,11 +1921,11 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,  	int rc = 0;  	int cpu; -	spin_lock(&session->lock); +	spin_lock(&session->back_lock);  	task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,  				 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);  	if (!task || !task->sc) { -		spin_unlock(&session->lock); +		spin_unlock(&session->back_lock);  		return -EINVAL;  	}  	sc = task->sc; @@ -1935,7 +1935,7 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,  	else  		cpu = sc->request->cpu; -	spin_unlock(&session->lock); +	spin_unlock(&session->back_lock);  	p = &per_cpu(bnx2i_percpu, cpu);  	spin_lock(&p->p_work_lock); diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c index 34c294b42c8..80c03b452d6 100644 --- a/drivers/scsi/bnx2i/bnx2i_init.c +++ b/drivers/scsi/bnx2i/bnx2i_init.c @@ -537,11 +537,15 @@ static int __init bnx2i_mod_init(void)  		p->iothread = NULL;  	} +	cpu_notifier_register_begin(); +  	for_each_online_cpu(cpu)  		bnx2i_percpu_thread_create(cpu);  	/* Initialize per CPU interrupt thread */ -	register_hotcpu_notifier(&bnx2i_cpu_notifier); +	__register_hotcpu_notifier(&bnx2i_cpu_notifier); + +	cpu_notifier_register_done();  	return 0; @@ -581,11 +585,15 @@ static void __exit bnx2i_mod_exit(void)  	}  	mutex_unlock(&bnx2i_dev_lock); -	unregister_hotcpu_notifier(&bnx2i_cpu_notifier); +	cpu_notifier_register_begin();  	for_each_online_cpu(cpu)  		bnx2i_percpu_thread_destroy(cpu); +	__unregister_hotcpu_notifier(&bnx2i_cpu_notifier); + +	cpu_notifier_register_done(); +  	iscsi_unregister_transport(&bnx2i_iscsi_transport);  	cnic_unregister_driver(CNIC_ULP_ISCSI);  } diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index fabeb88602a..166543f7ef5 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)  	struct iscsi_bd *mp_bdt;  	u64 addr; -	hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, +	hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  					    &hba->mp_bd_dma, GFP_KERNEL);  	if (!hba->mp_bd_tbl) {  		printk(KERN_ERR "unable to allocate Middle Path BDT\n"); @@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)  		goto out;  	} -	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, +	hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, +					       CNIC_PAGE_SIZE,  					       &hba->dummy_buf_dma, GFP_KERNEL);  	if (!hba->dummy_buffer) {  		printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n"); -		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, +		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  				  hba->mp_bd_tbl, hba->mp_bd_dma);  		hba->mp_bd_tbl = NULL;  		rc = -1; @@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)  	addr = (unsigned long) hba->dummy_buf_dma;  	mp_bdt->buffer_addr_lo = addr & 0xffffffff;  	mp_bdt->buffer_addr_hi = addr >> 32; -	mp_bdt->buffer_length = PAGE_SIZE; +	mp_bdt->buffer_length = CNIC_PAGE_SIZE;  	mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |  			ISCSI_BD_FIRST_IN_BD_CHAIN;  out: @@ -565,12 +566,12 @@ out:  static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)  {  	if (hba->mp_bd_tbl) { -		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, +		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  				  hba->mp_bd_tbl, hba->mp_bd_dma);  		hba->mp_bd_tbl = NULL;  	}  	if (hba->dummy_buffer) { -		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, +		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  				  hba->dummy_buffer, hba->dummy_buf_dma);  		hba->dummy_buffer = NULL;  	} @@ -596,7 +597,7 @@ void bnx2i_drop_session(struct iscsi_cls_session *cls_session)  /**   * bnx2i_ep_destroy_list_add - add an entry to EP destroy list   * @hba:	pointer to adapter instance - * @ep:		pointer to endpoint (transport indentifier) structure + * @ep:		pointer to endpoint (transport identifier) structure   *   * EP destroy queue manager   */ @@ -613,7 +614,7 @@ static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,   * bnx2i_ep_destroy_list_del - add an entry to EP destroy list   *   * @hba: 		pointer to adapter instance - * @ep: 		pointer to endpoint (transport indentifier) structure + * @ep: 		pointer to endpoint (transport identifier) structure   *   * EP destroy queue manager   */ @@ -630,7 +631,7 @@ static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,  /**   * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list   * @hba:	pointer to adapter instance - * @ep:		pointer to endpoint (transport indentifier) structure + * @ep:		pointer to endpoint (transport identifier) structure   *   * pending conn offload completion queue manager   */ @@ -646,7 +647,7 @@ static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,  /**   * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list   * @hba: 		pointer to adapter instance - * @ep: 		pointer to endpoint (transport indentifier) structure + * @ep: 		pointer to endpoint (transport identifier) structure   *   * pending conn offload completion queue manager   */ @@ -721,7 +722,7 @@ bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)  /**   * bnx2i_ep_active_list_add - add an entry to ep active list   * @hba:	pointer to adapter instance - * @ep:		pointer to endpoint (transport indentifier) structure + * @ep:		pointer to endpoint (transport identifier) structure   *   * current active conn queue manager   */ @@ -737,7 +738,7 @@ static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,  /**   * bnx2i_ep_active_list_del - deletes an entry to ep active list   * @hba:	pointer to adapter instance - * @ep:		pointer to endpoint (transport indentifier) structure + * @ep:		pointer to endpoint (transport identifier) structure   *   * current active conn queue manager   */ @@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,  					    struct bnx2i_conn *bnx2i_conn)  {  	if (bnx2i_conn->gen_pdu.resp_bd_tbl) { -		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, +		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  				  bnx2i_conn->gen_pdu.resp_bd_tbl,  				  bnx2i_conn->gen_pdu.resp_bd_dma);  		bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;  	}  	if (bnx2i_conn->gen_pdu.req_bd_tbl) { -		dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, +		dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  				  bnx2i_conn->gen_pdu.req_bd_tbl,  				  bnx2i_conn->gen_pdu.req_bd_dma);  		bnx2i_conn->gen_pdu.req_bd_tbl = NULL; @@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,  	bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;  	bnx2i_conn->gen_pdu.req_bd_tbl = -		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, +		dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  				   &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);  	if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)  		goto login_req_bd_tbl_failure;  	bnx2i_conn->gen_pdu.resp_bd_tbl = -		dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, +		dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  				   &bnx2i_conn->gen_pdu.resp_bd_dma,  				   GFP_KERNEL);  	if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL) @@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,  	return 0;  login_resp_bd_tbl_failure: -	dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, +	dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,  			  bnx2i_conn->gen_pdu.req_bd_tbl,  			  bnx2i_conn->gen_pdu.req_bd_dma);  	bnx2i_conn->gen_pdu.req_bd_tbl = NULL; @@ -1169,10 +1170,10 @@ static void bnx2i_cleanup_task(struct iscsi_task *task)  	if (task->state == ISCSI_TASK_ABRT_TMF) {  		bnx2i_send_cmd_cleanup_req(hba, task->dd_data); -		spin_unlock_bh(&conn->session->lock); +		spin_unlock_bh(&conn->session->back_lock);  		wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,  				msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT)); -		spin_lock_bh(&conn->session->lock); +		spin_lock_bh(&conn->session->back_lock);  	}  	bnx2i_iscsi_unmap_sg_list(task->dd_data);  } @@ -1695,7 +1696,7 @@ no_nx2_route:  /**   * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources   * @hba:	pointer to adapter instance - * @ep:		endpoint (transport indentifier) structure + * @ep:		endpoint (transport identifier) structure   *   * destroys cm_sock structure and on chip iscsi context   */ @@ -2059,7 +2060,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)  		goto out;  	if (session) { -		spin_lock_bh(&session->lock); +		spin_lock_bh(&session->frwd_lock);  		if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {  			if (session->state == ISCSI_STATE_LOGGING_OUT) {  				if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) { @@ -2075,7 +2076,7 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)  		} else  			close = 1; -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  	}  	bnx2i_ep->state = EP_STATE_DISCONN_START; diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 0eb35b9b378..0eaec474895 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -852,22 +852,6 @@ csio_hw_get_flash_params(struct csio_hw *hw)  	return 0;  } -static void -csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) -{ -	uint16_t val; -	int pcie_cap; - -	if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { -		pci_read_config_word(hw->pdev, -				     pcie_cap + PCI_EXP_DEVCTL2, &val); -		val &= 0xfff0; -		val |= range ; -		pci_write_config_word(hw->pdev, -				      pcie_cap + PCI_EXP_DEVCTL2, val); -	} -} -  /*****************************************************************************/  /* HW State machine assists                                                  */  /*****************************************************************************/ @@ -2069,8 +2053,10 @@ csio_hw_configure(struct csio_hw *hw)  		goto out;  	} -	/* Set pci completion timeout value to 4 seconds. */ -	csio_set_pcie_completion_timeout(hw, 0xd); +	/* Set PCIe completion timeout to 4 seconds */ +	if (pci_is_pcie(hw->pdev)) +		pcie_capability_clear_and_set_word(hw->pdev, PCI_EXP_DEVCTL2, +				PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);  	hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR); diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index 00346fe939d..1aafc331ee6 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -1010,7 +1010,6 @@ err_lnode_exit:  	csio_hw_stop(hw);  	spin_unlock_irq(&hw->lock);  	csio_lnodes_unblock_request(hw); -	pci_set_drvdata(hw->pdev, NULL);  	csio_lnodes_exit(hw, 0);  	csio_hw_free(hw);  err_pci_exit: @@ -1044,7 +1043,6 @@ static void csio_remove_one(struct pci_dev *pdev)  	csio_lnodes_exit(hw, 0);  	csio_hw_free(hw); -	pci_set_drvdata(pdev, NULL);  	csio_pci_exit(pdev, &bars);  } diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 5a9f84238a5..e8ee5e5fe0e 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -175,52 +175,6 @@ static inline int is_ofld_imm(const struct sk_buff *skb)  			sizeof(struct fw_ofld_tx_data_wr));  } - -#define VLAN_NONE 0xfff -#define FILTER_SEL_VLAN_NONE 0xffff -#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ -#define FILTER_SEL_WIDTH_VIN_P_FC \ -	(6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ -#define FILTER_SEL_WIDTH_TAG_P_FC \ -	(3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ -#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) - -static unsigned int select_ntuple(struct cxgbi_device *cdev, -				struct l2t_entry *l2t) -{ -	struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); -	unsigned int ntuple = 0; -	u32 viid; - -	switch (lldi->filt_mode) { - -	/* default filter mode */ -	case HW_TPL_FR_MT_PR_IV_P_FC: -		if (l2t->vlan == VLAN_NONE) -			ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; -		else { -			ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; -			ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC; -		} -		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << -			  FILTER_SEL_WIDTH_VLD_TAG_P_FC; -		break; -	case HW_TPL_FR_MT_PR_OV_P_FC: { -		viid = cxgb4_port_viid(l2t->neigh->dev); - -		ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; -		ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; -		ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; -		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << -			  FILTER_SEL_WIDTH_VLD_TAG_P_FC; -		break; -	} -	default: -		break; -	} -	return ntuple; -} -  static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,  				struct l2t_entry *e)  { @@ -248,8 +202,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,  		struct cpl_act_open_req *req =  				(struct cpl_act_open_req *)skb->head; -		req = (struct cpl_act_open_req *)skb->head; -  		INIT_TP_WR(req, 0);  		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,  					qid_atid)); @@ -258,7 +210,9 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,  		req->local_ip = csk->saddr.sin_addr.s_addr;  		req->peer_ip = csk->daddr.sin_addr.s_addr;  		req->opt0 = cpu_to_be64(opt0); -		req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t)); +		req->params = cpu_to_be32(cxgb4_select_ntuple( +					csk->cdev->ports[csk->port_id], +					csk->l2t));  		opt2 |= 1 << 22;  		req->opt2 = cpu_to_be32(opt2); @@ -271,8 +225,6 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,  		struct cpl_t5_act_open_req *req =  				(struct cpl_t5_act_open_req *)skb->head; -		req = (struct cpl_t5_act_open_req *)skb->head; -  		INIT_TP_WR(req, 0);  		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,  					qid_atid)); @@ -281,7 +233,10 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,  		req->local_ip = csk->saddr.sin_addr.s_addr;  		req->peer_ip = csk->daddr.sin_addr.s_addr;  		req->opt0 = cpu_to_be64(opt0); -		req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t)); +		req->params = cpu_to_be64(V_FILTER_TUPLE( +				cxgb4_select_ntuple( +					csk->cdev->ports[csk->port_id], +					csk->l2t)));  		opt2 |= 1 << 31;  		req->opt2 = cpu_to_be32(opt2); diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 694e13c45df..83d9bf6fa6c 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -308,6 +308,8 @@ struct AdapterCtlBlk {  	struct timer_list waiting_timer;  	struct timer_list selto_timer; +	unsigned long last_reset; +  	u16 srb_count;  	u8 sel_timeout; @@ -860,9 +862,9 @@ static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)  	init_timer(&acb->waiting_timer);  	acb->waiting_timer.function = waiting_timeout;  	acb->waiting_timer.data = (unsigned long) acb; -	if (time_before(jiffies + to, acb->scsi_host->last_reset - HZ / 2)) +	if (time_before(jiffies + to, acb->last_reset - HZ / 2))  		acb->waiting_timer.expires = -		    acb->scsi_host->last_reset - HZ / 2 + 1; +		    acb->last_reset - HZ / 2 + 1;  	else  		acb->waiting_timer.expires = jiffies + to + 1;  	add_timer(&acb->waiting_timer); @@ -1319,7 +1321,7 @@ static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)  	udelay(500);  	/* We may be in serious trouble. Wait some seconds */ -	acb->scsi_host->last_reset = +	acb->last_reset =  	    jiffies + 3 * HZ / 2 +  	    HZ * acb->eeprom.delay_time; @@ -1462,9 +1464,9 @@ static void selto_timer(struct AdapterCtlBlk *acb)  	acb->selto_timer.function = selection_timeout_missed;  	acb->selto_timer.data = (unsigned long) acb;  	if (time_before -	    (jiffies + HZ, acb->scsi_host->last_reset + HZ / 2)) +	    (jiffies + HZ, acb->last_reset + HZ / 2))  		acb->selto_timer.expires = -		    acb->scsi_host->last_reset + HZ / 2 + 1; +		    acb->last_reset + HZ / 2 + 1;  	else  		acb->selto_timer.expires = jiffies + HZ + 1;  	add_timer(&acb->selto_timer); @@ -1535,7 +1537,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,  	}  	/* Allow starting of SCSI commands half a second before we allow the mid-level  	 * to queue them again after a reset */ -	if (time_before(jiffies, acb->scsi_host->last_reset - HZ / 2)) { +	if (time_before(jiffies, acb->last_reset - HZ / 2)) {  		dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");  		return 1;  	} @@ -3031,7 +3033,7 @@ static void disconnect(struct AdapterCtlBlk *acb)  		dprintkl(KERN_ERR, "disconnect: No such device\n");  		udelay(500);  		/* Suspend queue for a while */ -		acb->scsi_host->last_reset = +		acb->last_reset =  		    jiffies + HZ / 2 +  		    HZ * acb->eeprom.delay_time;  		clear_fifo(acb, "disconnectEx"); @@ -3053,7 +3055,7 @@ static void disconnect(struct AdapterCtlBlk *acb)  		waiting_process_next(acb);  	} else if (srb->state & SRB_ABORT_SENT) {  		dcb->flag &= ~ABORT_DEV_; -		acb->scsi_host->last_reset = jiffies + HZ / 2 + 1; +		acb->last_reset = jiffies + HZ / 2 + 1;  		dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");  		doing_srb_done(acb, DID_ABORT, srb->cmd, 1);  		waiting_process_next(acb); @@ -3649,7 +3651,7 @@ static void scsi_reset_detect(struct AdapterCtlBlk *acb)  	/*DC395x_write8(acb, TRM_S1040_DMA_CONTROL,STOPDMAXFER); */  	udelay(500);  	/* Maybe we locked up the bus? Then lets wait even longer ... */ -	acb->scsi_host->last_reset = +	acb->last_reset =  	    jiffies + 5 * HZ / 2 +  	    HZ * acb->eeprom.delay_time; @@ -4426,7 +4428,7 @@ static void adapter_init_scsi_host(struct Scsi_Host *host)  	host->dma_channel = -1;  	host->unique_id = acb->io_port_base;  	host->irq = acb->irq_level; -	host->last_reset = jiffies; +	acb->last_reset = jiffies;  	host->max_id = 16;  	if (host->max_id - 1 == eeprom->scsi_id) @@ -4484,7 +4486,7 @@ static void adapter_init_chip(struct AdapterCtlBlk *acb)  		/*spin_unlock_irq (&io_request_lock); */  		udelay(500); -		acb->scsi_host->last_reset = +		acb->last_reset =  		    jiffies + HZ / 2 +  		    HZ * acb->eeprom.delay_time; @@ -4859,7 +4861,6 @@ static void dc395x_remove_one(struct pci_dev *dev)  	adapter_uninit(acb);  	pci_disable_device(dev);  	scsi_host_put(scsi_host); -	pci_set_drvdata(dev, NULL);  } diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 68adb8955d2..7bcf67eec92 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -120,6 +120,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,  			    "%s: blk_get_request failed\n", __func__);  		return NULL;  	} +	blk_rq_set_block_pc(rq);  	if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {  		blk_put_request(rq); @@ -128,7 +129,6 @@ static struct request *get_alua_req(struct scsi_device *sdev,  		return NULL;  	} -	rq->cmd_type = REQ_TYPE_BLOCK_PC;  	rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |  			 REQ_FAILFAST_DRIVER;  	rq->retries = ALUA_FAILOVER_RETRIES; @@ -481,6 +481,11 @@ static int alua_check_sense(struct scsi_device *sdev,  			 * Power On, Reset, or Bus Device Reset, just retry.  			 */  			return ADD_TO_MLQUEUE; +		if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) +			/* +			 * Device internal reset +			 */ +			return ADD_TO_MLQUEUE;  		if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)  			/*  			 * Mode Parameters Changed @@ -517,12 +522,13 @@ static int alua_check_sense(struct scsi_device *sdev,  /*   * alua_rtpg - Evaluate REPORT TARGET GROUP STATES   * @sdev: the device to be evaluated. + * @wait_for_transition: if nonzero, wait ALUA_FAILOVER_TIMEOUT seconds for device to exit transitioning state   *   * Evaluate the Target Port Group State.   * Returns SCSI_DH_DEV_OFFLINED if the path is   * found to be unusable.   */ -static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) +static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h, int wait_for_transition)  {  	struct scsi_sense_hdr sense_hdr;  	int len, k, off, valid_states = 0; @@ -594,7 +600,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)  	else  		h->transition_tmo = ALUA_FAILOVER_TIMEOUT; -	if (orig_transition_tmo != h->transition_tmo) { +	if (wait_for_transition && (orig_transition_tmo != h->transition_tmo)) {  		sdev_printk(KERN_INFO, sdev,  			    "%s: transition timeout set to %d seconds\n",  			    ALUA_DH_NAME, h->transition_tmo); @@ -632,14 +638,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)  	switch (h->state) {  	case TPGS_STATE_TRANSITIONING: -		if (time_before(jiffies, expiry)) { -			/* State transition, retry */ -			interval += 2000; -			msleep(interval); -			goto retry; +		if (wait_for_transition) { +			if (time_before(jiffies, expiry)) { +				/* State transition, retry */ +				interval += 2000; +				msleep(interval); +				goto retry; +			} +			err = SCSI_DH_RETRY; +		} else { +			err = SCSI_DH_OK;  		} +  		/* Transitioning time exceeded, set port to standby */ -		err = SCSI_DH_RETRY;  		h->state = TPGS_STATE_STANDBY;  		break;  	case TPGS_STATE_OFFLINE: @@ -673,7 +684,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)  	if (err != SCSI_DH_OK)  		goto out; -	err = alua_rtpg(sdev, h); +	err = alua_rtpg(sdev, h, 0);  	if (err != SCSI_DH_OK)  		goto out; @@ -733,7 +744,7 @@ static int alua_activate(struct scsi_device *sdev,  	int err = SCSI_DH_OK;  	int stpg = 0; -	err = alua_rtpg(sdev, h); +	err = alua_rtpg(sdev, h, 1);  	if (err != SCSI_DH_OK)  		goto out; diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c index e1c8be06de9..6f07f7fe3aa 100644 --- a/drivers/scsi/device_handler/scsi_dh_emc.c +++ b/drivers/scsi/device_handler/scsi_dh_emc.c @@ -280,6 +280,7 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,  		return NULL;  	} +	blk_rq_set_block_pc(rq);  	rq->cmd_len = COMMAND_SIZE(cmd);  	rq->cmd[0] = cmd; @@ -304,7 +305,6 @@ static struct request *get_req(struct scsi_device *sdev, int cmd,  		break;  	} -	rq->cmd_type = REQ_TYPE_BLOCK_PC;  	rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |  			 REQ_FAILFAST_DRIVER;  	rq->timeout = CLARIION_TIMEOUT; diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c index 084062bb8ee..e9d9fea9e27 100644 --- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c +++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c @@ -120,7 +120,7 @@ retry:  	if (!req)  		return SCSI_DH_RES_TEMP_UNAVAIL; -	req->cmd_type = REQ_TYPE_BLOCK_PC; +	blk_rq_set_block_pc(req);  	req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |  			  REQ_FAILFAST_DRIVER;  	req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); @@ -250,7 +250,7 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)  	if (!req)  		return SCSI_DH_RES_TEMP_UNAVAIL; -	req->cmd_type = REQ_TYPE_BLOCK_PC; +	blk_rq_set_block_pc(req);  	req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |  			  REQ_FAILFAST_DRIVER;  	req->cmd_len = COMMAND_SIZE(START_STOP); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 69c915aa77c..826069db984 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -279,6 +279,7 @@ static struct request *get_rdac_req(struct scsi_device *sdev,  				"get_rdac_req: blk_get_request failed.\n");  		return NULL;  	} +	blk_rq_set_block_pc(rq);  	if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) {  		blk_put_request(rq); @@ -287,7 +288,6 @@ static struct request *get_rdac_req(struct scsi_device *sdev,  		return NULL;  	} -	rq->cmd_type = REQ_TYPE_BLOCK_PC;  	rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |  			 REQ_FAILFAST_DRIVER;  	rq->retries = RDAC_RETRIES; @@ -786,6 +786,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {  	{"IBM", "1742"},  	{"IBM", "1745"},  	{"IBM", "1746"}, +	{"IBM", "1813"},  	{"IBM", "1814"},  	{"IBM", "1815"},  	{"IBM", "1818"}, diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 19e1b422260..c0ae8fa57a3 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -448,19 +448,8 @@ static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd  	}  	rmb(); -	/* -	 * TODO: I need to block here if I am processing ioctl cmds -	 * but if the outstanding cmds all finish before the ioctl, -	 * the scsi-core will not know to start sending cmds to me again. -	 * I need to a way to restart the scsi-cores queues or should I block -	 * calling scsi_done on the outstanding cmds instead -	 * for now we don't set the IOCTL state -	 */ -	if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) { -		pHba->host->last_reset = jiffies; -		pHba->host->resetting = 1; -		return 1; -	} +	if ((pHba->state) & DPTI_STATE_RESET) +		return SCSI_MLQUEUE_HOST_BUSY;  	// TODO if the cmd->device if offline then I may need to issue a bus rescan  	// followed by a get_lct to see if the device is there anymore @@ -1811,21 +1800,23 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)  	}  	do { -		if(pHba->host) +		/* +		 * Stop any new commands from enterring the +		 * controller while processing the ioctl +		 */ +		if (pHba->host) { +			scsi_block_requests(pHba->host);  			spin_lock_irqsave(pHba->host->host_lock, flags); -		// This state stops any new commands from enterring the -		// controller while processing the ioctl -//		pHba->state |= DPTI_STATE_IOCTL; -//		We can't set this now - The scsi subsystem sets host_blocked and -//		the queue empties and stops.  We need a way to restart the queue +		}  		rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);  		if (rcode != 0)  			printk("adpt_i2o_passthru: post wait failed %d %p\n",  					rcode, reply); -//		pHba->state &= ~DPTI_STATE_IOCTL; -		if(pHba->host) +		if (pHba->host) {  			spin_unlock_irqrestore(pHba->host->host_lock, flags); -	} while(rcode == -ETIMEDOUT);   +			scsi_unblock_requests(pHba->host); +		} +	} while (rcode == -ETIMEDOUT);  	if(rcode){  		goto cleanup; diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h index beded716f93..aeb046186c8 100644 --- a/drivers/scsi/dpti.h +++ b/drivers/scsi/dpti.h @@ -202,7 +202,6 @@ struct adpt_channel {  // HBA state flags  #define DPTI_STATE_RESET	(0x01) -#define DPTI_STATE_IOCTL	(0x02)  typedef struct _adpt_hba {  	struct _adpt_hba *next; diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c index d01f0160414..0a667fe0500 100644 --- a/drivers/scsi/dtc.c +++ b/drivers/scsi/dtc.c @@ -3,8 +3,6 @@  #define PSEUDO_DMA  #define DONT_USE_INTR  #define UNSAFE			/* Leave interrupts enabled during pseudo-dma I/O */ -#define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\ -		 NDEBUG_SELECTION+NDEBUG_ARBITRATION)  #define DMA_WORKS_RIGHT @@ -277,7 +275,7 @@ found:  		/* With interrupts enabled, it will sometimes hang when doing heavy  		 * reads. So better not enable them until I finger it out. */  		if (instance->irq != SCSI_IRQ_NONE) -			if (request_irq(instance->irq, dtc_intr, IRQF_DISABLED, +			if (request_irq(instance->irq, dtc_intr, 0,  					"dtc", instance)) {  				printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);  				instance->irq = SCSI_IRQ_NONE; diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index 94de88955a9..ebf57364df9 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -1221,7 +1221,7 @@ static int port_detect(unsigned long port_base, unsigned int j,  	/* Board detected, allocate its IRQ */  	if (request_irq(irq, do_interrupt_handler, -			IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0), +			(subversion == ESA) ? IRQF_SHARED : 0,  			driver_name, (void *)&sha[j])) {  		printk("%s: unable to allocate IRQ %u, detaching.\n", name,  		       irq); diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index 1663173cdb9..8319d2b417b 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c @@ -687,7 +687,7 @@ static int register_pio_HBA(long base, struct get_conf *gc, struct pci_dev *pdev  		return 0;  	if (!reg_IRQ[gc->IRQ]) {	/* Interrupt already registered ? */ -		if (!request_irq(gc->IRQ, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", sh)) { +		if (!request_irq(gc->IRQ, do_eata_pio_int_handler, 0, "EATA-PIO", sh)) {  			reg_IRQ[gc->IRQ]++;  			if (!gc->IRQ_TR)  				reg_IRQL[gc->IRQ] = 1;	/* IRQ is edge triggered */ @@ -921,7 +921,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)  	for (i = 0; i < MAXIRQ; i++)  		if (reg_IRQ[i]) -			request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL); +			request_irq(i, do_eata_pio_int_handler, 0, "EATA-PIO", NULL);  	HBA_ptr = first_HBA; diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h index 0838e265e0b..3fd305d6b67 100644 --- a/drivers/scsi/esas2r/esas2r.h +++ b/drivers/scsi/esas2r/esas2r.h @@ -799,47 +799,47 @@ struct esas2r_adapter {  	struct esas2r_target *targetdb_end;  	unsigned char *regs;  	unsigned char *data_window; -	u32 volatile flags; -	#define AF_PORT_CHANGE      (u32)(0x00000001) -	#define AF_CHPRST_NEEDED    (u32)(0x00000004) -	#define AF_CHPRST_PENDING   (u32)(0x00000008) -	#define AF_CHPRST_DETECTED  (u32)(0x00000010) -	#define AF_BUSRST_NEEDED    (u32)(0x00000020) -	#define AF_BUSRST_PENDING   (u32)(0x00000040) -	#define AF_BUSRST_DETECTED  (u32)(0x00000080) -	#define AF_DISABLED         (u32)(0x00000100) -	#define AF_FLASH_LOCK       (u32)(0x00000200) -	#define AF_OS_RESET         (u32)(0x00002000) -	#define AF_FLASHING         (u32)(0x00004000) -	#define AF_POWER_MGT        (u32)(0x00008000) -	#define AF_NVR_VALID        (u32)(0x00010000) -	#define AF_DEGRADED_MODE    (u32)(0x00020000) -	#define AF_DISC_PENDING     (u32)(0x00040000) -	#define AF_TASKLET_SCHEDULED    (u32)(0x00080000) -	#define AF_HEARTBEAT        (u32)(0x00200000) -	#define AF_HEARTBEAT_ENB    (u32)(0x00400000) -	#define AF_NOT_PRESENT      (u32)(0x00800000) -	#define AF_CHPRST_STARTED   (u32)(0x01000000) -	#define AF_FIRST_INIT       (u32)(0x02000000) -	#define AF_POWER_DOWN       (u32)(0x04000000) -	#define AF_DISC_IN_PROG     (u32)(0x08000000) -	#define AF_COMM_LIST_TOGGLE (u32)(0x10000000) -	#define AF_LEGACY_SGE_MODE  (u32)(0x20000000) -	#define AF_DISC_POLLED      (u32)(0x40000000) -	u32 volatile flags2; -	#define AF2_SERIAL_FLASH    (u32)(0x00000001) -	#define AF2_DEV_SCAN        (u32)(0x00000002) -	#define AF2_DEV_CNT_OK      (u32)(0x00000004) -	#define AF2_COREDUMP_AVAIL  (u32)(0x00000008) -	#define AF2_COREDUMP_SAVED  (u32)(0x00000010) -	#define AF2_VDA_POWER_DOWN  (u32)(0x00000100) -	#define AF2_THUNDERLINK     (u32)(0x00000200) -	#define AF2_THUNDERBOLT     (u32)(0x00000400) -	#define AF2_INIT_DONE       (u32)(0x00000800) -	#define AF2_INT_PENDING     (u32)(0x00001000) -	#define AF2_TIMER_TICK      (u32)(0x00002000) -	#define AF2_IRQ_CLAIMED     (u32)(0x00004000) -	#define AF2_MSI_ENABLED     (u32)(0x00008000) +	long flags; +	#define AF_PORT_CHANGE      0 +	#define AF_CHPRST_NEEDED    1 +	#define AF_CHPRST_PENDING   2 +	#define AF_CHPRST_DETECTED  3 +	#define AF_BUSRST_NEEDED    4 +	#define AF_BUSRST_PENDING   5 +	#define AF_BUSRST_DETECTED  6 +	#define AF_DISABLED         7 +	#define AF_FLASH_LOCK       8 +	#define AF_OS_RESET         9 +	#define AF_FLASHING         10 +	#define AF_POWER_MGT        11 +	#define AF_NVR_VALID        12 +	#define AF_DEGRADED_MODE    13 +	#define AF_DISC_PENDING     14 +	#define AF_TASKLET_SCHEDULED    15 +	#define AF_HEARTBEAT        16 +	#define AF_HEARTBEAT_ENB    17 +	#define AF_NOT_PRESENT      18 +	#define AF_CHPRST_STARTED   19 +	#define AF_FIRST_INIT       20 +	#define AF_POWER_DOWN       21 +	#define AF_DISC_IN_PROG     22 +	#define AF_COMM_LIST_TOGGLE 23 +	#define AF_LEGACY_SGE_MODE  24 +	#define AF_DISC_POLLED      25 +	long flags2; +	#define AF2_SERIAL_FLASH    0 +	#define AF2_DEV_SCAN        1 +	#define AF2_DEV_CNT_OK      2 +	#define AF2_COREDUMP_AVAIL  3 +	#define AF2_COREDUMP_SAVED  4 +	#define AF2_VDA_POWER_DOWN  5 +	#define AF2_THUNDERLINK     6 +	#define AF2_THUNDERBOLT     7 +	#define AF2_INIT_DONE       8 +	#define AF2_INT_PENDING     9 +	#define AF2_TIMER_TICK      10 +	#define AF2_IRQ_CLAIMED     11 +	#define AF2_MSI_ENABLED     12  	atomic_t disable_cnt;  	atomic_t dis_ints_cnt;  	u32 int_stat; @@ -1150,16 +1150,6 @@ void esas2r_queue_fw_event(struct esas2r_adapter *a,  			   int data_sz);  /* Inline functions */ -static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits) -{ -	return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags); -} - -static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits) -{ -	return test_and_clear_bit(ilog2(bits), -				  (volatile unsigned long *)flags); -}  /* Allocate a chip scatter/gather list entry */  static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a) @@ -1217,7 +1207,6 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq,  					  struct esas2r_adapter *a)  {  	union atto_vda_req *vrq = rq->vrq; -	u32 handle;  	INIT_LIST_HEAD(&rq->sg_table_head);  	rq->data_buf = (void *)(vrq + 1); @@ -1253,11 +1242,9 @@ static inline void esas2r_rq_init_request(struct esas2r_request *rq,  	/*  	 * add a reference number to the handle to make it unique (until it -	 * wraps of course) while preserving the upper word +	 * wraps of course) while preserving the least significant word  	 */ - -	handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000; -	vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++); +	vrq->scsi.handle = (a->cmd_ref_no++ << 16) | (u16)vrq->scsi.handle;  	/*  	 * the following formats a SCSI request.  the caller can override as @@ -1303,10 +1290,13 @@ static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,  static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)  { -	return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED -			    | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED -			    | AF_PORT_CHANGE)) -	       ? true : false; + +	return test_bit(AF_BUSRST_NEEDED, &a->flags) || +	       test_bit(AF_BUSRST_DETECTED, &a->flags) || +	       test_bit(AF_CHPRST_NEEDED, &a->flags) || +	       test_bit(AF_CHPRST_DETECTED, &a->flags) || +	       test_bit(AF_PORT_CHANGE, &a->flags); +  }  /* @@ -1345,24 +1335,24 @@ static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)  static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)  {  	/* make sure we don't schedule twice */ -	if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) & -	      ilog2(AF_TASKLET_SCHEDULED))) +	if (!test_and_set_bit(AF_TASKLET_SCHEDULED, &a->flags))  		tasklet_hi_schedule(&a->tasklet);  }  static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)  { -	if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING)) -	    && (a->nvram->options2 & SASNVR2_HEARTBEAT)) -		esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB); +	if (!test_bit(AF_DEGRADED_MODE, &a->flags) && +	    !test_bit(AF_CHPRST_PENDING, &a->flags) && +	    (a->nvram->options2 & SASNVR2_HEARTBEAT)) +		set_bit(AF_HEARTBEAT_ENB, &a->flags);  	else -		esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); +		clear_bit(AF_HEARTBEAT_ENB, &a->flags);  }  static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)  { -	esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB); -	esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); +	clear_bit(AF_HEARTBEAT_ENB, &a->flags); +	clear_bit(AF_HEARTBEAT, &a->flags);  }  /* Set the initial state for resetting the adapter on the next pass through @@ -1372,9 +1362,9 @@ static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)  {  	esas2r_disable_heartbeat(a); -	esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED); -	esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); -	esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); +	set_bit(AF_CHPRST_NEEDED, &a->flags); +	set_bit(AF_CHPRST_PENDING, &a->flags); +	set_bit(AF_DISC_PENDING, &a->flags);  }  /* See if an interrupt is pending on the adapter. */ diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c index dec6c334ce3..1c079f4300a 100644 --- a/drivers/scsi/esas2r/esas2r_disc.c +++ b/drivers/scsi/esas2r/esas2r_disc.c @@ -86,9 +86,9 @@ void esas2r_disc_initialize(struct esas2r_adapter *a)  	esas2r_trace_enter(); -	esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); -	esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN); -	esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK); +	clear_bit(AF_DISC_IN_PROG, &a->flags); +	clear_bit(AF2_DEV_SCAN, &a->flags2); +	clear_bit(AF2_DEV_CNT_OK, &a->flags2);  	a->disc_start_time = jiffies_to_msecs(jiffies);  	a->disc_wait_time = nvr->dev_wait_time * 1000; @@ -107,7 +107,8 @@ void esas2r_disc_initialize(struct esas2r_adapter *a)  	a->general_req.interrupt_cx = NULL; -	if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) { +	if (test_bit(AF_CHPRST_DETECTED, &a->flags) || +	    test_bit(AF_POWER_MGT, &a->flags)) {  		if (a->prev_dev_cnt == 0) {  			/* Don't bother waiting if there is nothing to wait  			 * for. @@ -212,9 +213,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)  			|| a->disc_wait_cnt == 0)) {  			/* After three seconds of waiting, schedule a scan. */  			if (time >= 3000 -			    && !(esas2r_lock_set_flags(&a->flags2, -						       AF2_DEV_SCAN) & -				 ilog2(AF2_DEV_SCAN))) { +			    && !test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {  				spin_lock_irqsave(&a->mem_lock, flags);  				esas2r_disc_queue_event(a, DCDE_DEV_SCAN);  				spin_unlock_irqrestore(&a->mem_lock, flags); @@ -228,18 +227,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)  		 * We are done waiting...we think.  Adjust the wait time to  		 * consume events after the count is met.  		 */ -		if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK) -		      & ilog2(AF2_DEV_CNT_OK))) +		if (!test_and_set_bit(AF2_DEV_CNT_OK, &a->flags2))  			a->disc_wait_time = time + 3000;  		/* If we haven't done a full scan yet, do it now. */ -		if (!(esas2r_lock_set_flags(&a->flags2, -					    AF2_DEV_SCAN) & -		      ilog2(AF2_DEV_SCAN))) { +		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {  			spin_lock_irqsave(&a->mem_lock, flags);  			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);  			spin_unlock_irqrestore(&a->mem_lock, flags); -  			esas2r_trace_exit();  			return;  		} @@ -253,9 +248,7 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)  			return;  		}  	} else { -		if (!(esas2r_lock_set_flags(&a->flags2, -					    AF2_DEV_SCAN) & -		      ilog2(AF2_DEV_SCAN))) { +		if (!test_and_set_bit(AF2_DEV_SCAN, &a->flags2)) {  			spin_lock_irqsave(&a->mem_lock, flags);  			esas2r_disc_queue_event(a, DCDE_DEV_SCAN);  			spin_unlock_irqrestore(&a->mem_lock, flags); @@ -265,8 +258,8 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)  	/* We want to stop waiting for devices. */  	a->disc_wait_time = 0; -	if ((a->flags & AF_DISC_POLLED) -	    && (a->flags & AF_DISC_IN_PROG)) { +	if (test_bit(AF_DISC_POLLED, &a->flags) && +	    test_bit(AF_DISC_IN_PROG, &a->flags)) {  		/*  		 * Polled discovery is still pending so continue the active  		 * discovery until it is done.  At that point, we will stop @@ -280,14 +273,14 @@ void esas2r_disc_check_complete(struct esas2r_adapter *a)  		 * driven; i.e. There is no transition.  		 */  		esas2r_disc_fix_curr_requests(a); -		esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); +		clear_bit(AF_DISC_PENDING, &a->flags);  		/*  		 * We have deferred target state changes until now because we  		 * don't want to report any removals (due to the first arrival)  		 * until the device wait time expires.  		 */ -		esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); +		set_bit(AF_PORT_CHANGE, &a->flags);  	}  	esas2r_trace_exit(); @@ -308,7 +301,8 @@ void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)  	 * Don't start discovery before or during polled discovery.  if we did,  	 * we would have a deadlock if we are in the ISR already.  	 */ -	if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED))) +	if (!test_bit(AF_CHPRST_PENDING, &a->flags) && +	    !test_bit(AF_DISC_POLLED, &a->flags))  		esas2r_disc_start_port(a);  	esas2r_trace_exit(); @@ -322,7 +316,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)  	esas2r_trace_enter(); -	if (a->flags & AF_DISC_IN_PROG) { +	if (test_bit(AF_DISC_IN_PROG, &a->flags)) {  		esas2r_trace_exit();  		return false; @@ -330,7 +324,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)  	/* If there is a discovery waiting, process it. */  	if (dc->disc_evt) { -		if ((a->flags & AF_DISC_POLLED) +		if (test_bit(AF_DISC_POLLED, &a->flags)  		    && a->disc_wait_time == 0) {  			/*  			 * We are doing polled discovery, but we no longer want @@ -347,7 +341,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)  		esas2r_hdebug("disc done"); -		esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE); +		set_bit(AF_PORT_CHANGE, &a->flags);  		esas2r_trace_exit(); @@ -356,10 +350,10 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)  	/* Handle the discovery context */  	esas2r_trace("disc_evt: %d", dc->disc_evt); -	esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG); +	set_bit(AF_DISC_IN_PROG, &a->flags);  	dc->flags = 0; -	if (a->flags & AF_DISC_POLLED) +	if (test_bit(AF_DISC_POLLED, &a->flags))  		dc->flags |= DCF_POLLED;  	rq->interrupt_cx = dc; @@ -379,7 +373,7 @@ bool esas2r_disc_start_port(struct esas2r_adapter *a)  	}  	/* Continue interrupt driven discovery */ -	if (!(a->flags & AF_DISC_POLLED)) +	if (!test_bit(AF_DISC_POLLED, &a->flags))  		ret = esas2r_disc_continue(a, rq);  	else  		ret = true; @@ -453,10 +447,10 @@ static bool esas2r_disc_continue(struct esas2r_adapter *a,  	/* Discovery is done...for now. */  	rq->interrupt_cx = NULL; -	if (!(a->flags & AF_DISC_PENDING)) +	if (!test_bit(AF_DISC_PENDING, &a->flags))  		esas2r_disc_fix_curr_requests(a); -	esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); +	clear_bit(AF_DISC_IN_PROG, &a->flags);  	/* Start the next discovery. */  	return esas2r_disc_start_port(a); @@ -480,7 +474,8 @@ static bool esas2r_disc_start_request(struct esas2r_adapter *a,  	spin_lock_irqsave(&a->queue_lock, flags); -	if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING))) +	if (!test_bit(AF_CHPRST_PENDING, &a->flags) && +	    !test_bit(AF_FLASHING, &a->flags))  		esas2r_disc_local_start_request(a, rq);  	else  		list_add_tail(&rq->req_list, &a->defer_list); diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c index 2ec3c23275b..b7dc59fca7a 100644 --- a/drivers/scsi/esas2r/esas2r_flash.c +++ b/drivers/scsi/esas2r/esas2r_flash.c @@ -231,7 +231,7 @@ static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)  	 * RS_PENDING, FM API tasks will continue.  	 */  	rq->req_stat = RS_PENDING; -	if (a->flags & AF_DEGRADED_MODE) +	if (test_bit(AF_DEGRADED_MODE, &a->flags))  		/* not suppported for now */;  	else  		build_flash_msg(a, rq); @@ -315,7 +315,7 @@ static bool complete_fmapi_req(struct esas2r_adapter *a,  		memset(fc->scratch, 0, FM_BUF_SZ);  	esas2r_enable_heartbeat(a); -	esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK); +	clear_bit(AF_FLASH_LOCK, &a->flags);  	return false;  } @@ -526,7 +526,7 @@ no_cfg:  			 * The download is complete.  If in degraded mode,  			 * attempt a chip reset.  			 */ -			if (a->flags & AF_DEGRADED_MODE) +			if (test_bit(AF_DEGRADED_MODE, &a->flags))  				esas2r_local_reset_adapter(a);  			a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version; @@ -890,7 +890,7 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,  		}  	} -	if (a->flags & AF_DEGRADED_MODE) { +	if (test_bit(AF_DEGRADED_MODE, &a->flags)) {  		fs->status = ATTO_STS_DEGRADED;  		return false;  	} @@ -945,8 +945,12 @@ static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)  	/* Now wait for the firmware to process it */  	starttime = jiffies_to_msecs(jiffies); -	timeout = a->flags & -		  (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000; + +	if (test_bit(AF_CHPRST_PENDING, &a->flags) || +	    test_bit(AF_DISC_PENDING, &a->flags)) +		timeout = 40000; +	else +		timeout = 5000;  	while (true) {  		intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT); @@ -1008,7 +1012,7 @@ bool esas2r_read_flash_block(struct esas2r_adapter *a,  		u32 offset;  		u32 iatvr; -		if (a->flags2 & AF2_SERIAL_FLASH) +		if (test_bit(AF2_SERIAL_FLASH, &a->flags2))  			iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);  		else  			iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE); @@ -1236,9 +1240,9 @@ static void esas2r_nvram_callback(struct esas2r_adapter *a,  	if (rq->req_stat != RS_PENDING) {  		/* update the NVRAM state */  		if (rq->req_stat == RS_SUCCESS) -			esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); +			set_bit(AF_NVR_VALID, &a->flags);  		else -			esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); +			clear_bit(AF_NVR_VALID, &a->flags);  		esas2r_enable_heartbeat(a); @@ -1258,7 +1262,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,  	u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];  	struct atto_vda_flash_req *vrq = &rq->vrq->flash; -	if (a->flags & AF_DEGRADED_MODE) +	if (test_bit(AF_DEGRADED_MODE, &a->flags))  		return false;  	if (down_interruptible(&a->nvram_semaphore)) @@ -1302,7 +1306,7 @@ bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,  			       FLS_OFFSET_NVR,  			       sizeof(struct esas2r_sas_nvram)); -	if (a->flags & AF_LEGACY_SGE_MODE) { +	if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {  		vrq->data.sge[0].length =  			cpu_to_le32(SGE_LAST | @@ -1337,7 +1341,7 @@ bool esas2r_nvram_validate(struct esas2r_adapter *a)  	} else if (n->version > SASNVR_VERSION) {  		esas2r_hdebug("invalid NVRAM version");  	} else { -		esas2r_lock_set_flags(&a->flags, AF_NVR_VALID); +		set_bit(AF_NVR_VALID, &a->flags);  		rslt = true;  	} @@ -1359,7 +1363,7 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a)  	struct esas2r_sas_nvram *n = a->nvram;  	u32 time = jiffies_to_msecs(jiffies); -	esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); +	clear_bit(AF_NVR_VALID, &a->flags);  	*n = default_sas_nvram;  	n->sas_addr[3] |= 0x0F;  	n->sas_addr[4] = HIBYTE(LOWORD(time)); @@ -1389,7 +1393,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,  	u8 j;  	struct esas2r_component_header *ch; -	if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) { +	if (test_and_set_bit(AF_FLASH_LOCK, &a->flags)) {  		/* flag was already set */  		fi->status = FI_STAT_BUSY;  		return false; @@ -1413,7 +1417,7 @@ bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,  		return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);  	} -	if (a->flags & AF_DEGRADED_MODE) +	if (test_bit(AF_DEGRADED_MODE, &a->flags))  		return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);  	switch (fi->action) { diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index da1869df240..6776931e25d 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -216,7 +216,7 @@ use_legacy_interrupts:  			goto use_legacy_interrupts;  		}  		a->intr_mode = INTR_MODE_MSI; -		esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED); +		set_bit(AF2_MSI_ENABLED, &a->flags2);  		break; @@ -231,7 +231,7 @@ use_legacy_interrupts:  static void esas2r_claim_interrupts(struct esas2r_adapter *a)  { -	unsigned long flags = IRQF_DISABLED; +	unsigned long flags = 0;  	if (a->intr_mode == INTR_MODE_LEGACY)  		flags |= IRQF_SHARED; @@ -252,7 +252,7 @@ static void esas2r_claim_interrupts(struct esas2r_adapter *a)  		return;  	} -	esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED); +	set_bit(AF2_IRQ_CLAIMED, &a->flags2);  	esas2r_log(ESAS2R_LOG_INFO,  		   "claimed IRQ %d flags: 0x%lx",  		   a->pcid->irq, flags); @@ -380,10 +380,10 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,  	/* interrupts will be disabled until we are done with init */  	atomic_inc(&a->dis_ints_cnt);  	atomic_inc(&a->disable_cnt); -	a->flags |= AF_CHPRST_PENDING -		    | AF_DISC_PENDING -		    | AF_FIRST_INIT -		    | AF_LEGACY_SGE_MODE; +	set_bit(AF_CHPRST_PENDING, &a->flags); +	set_bit(AF_DISC_PENDING, &a->flags); +	set_bit(AF_FIRST_INIT, &a->flags); +	set_bit(AF_LEGACY_SGE_MODE, &a->flags);  	a->init_msg = ESAS2R_INIT_MSG_START;  	a->max_vdareq_size = 128; @@ -440,11 +440,11 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,  	esas2r_claim_interrupts(a); -	if (a->flags2 & AF2_IRQ_CLAIMED) +	if (test_bit(AF2_IRQ_CLAIMED, &a->flags2))  		esas2r_enable_chip_interrupts(a); -	esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE); -	if (!(a->flags & AF_DEGRADED_MODE)) +	set_bit(AF2_INIT_DONE, &a->flags2); +	if (!test_bit(AF_DEGRADED_MODE, &a->flags))  		esas2r_kickoff_timer(a);  	esas2r_debug("esas2r_init_adapter done for %p (%d)",  		     a, a->disable_cnt); @@ -457,8 +457,8 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a,  {  	struct esas2r_mem_desc *memdesc, *next; -	if ((a->flags2 & AF2_INIT_DONE) -	    &&  (!(a->flags & AF_DEGRADED_MODE))) { +	if ((test_bit(AF2_INIT_DONE, &a->flags2)) +	    &&  (!test_bit(AF_DEGRADED_MODE, &a->flags))) {  		if (!power_management) {  			del_timer_sync(&a->timer);  			tasklet_kill(&a->tasklet); @@ -508,19 +508,19 @@ static void esas2r_adapter_power_down(struct esas2r_adapter *a,  	}  	/* Clean up interrupts */ -	if (a->flags2 & AF2_IRQ_CLAIMED) { +	if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {  		esas2r_log_dev(ESAS2R_LOG_INFO,  			       &(a->pcid->dev),  			       "free_irq(%d) called", a->pcid->irq);  		free_irq(a->pcid->irq, a);  		esas2r_debug("IRQ released"); -		esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED); +		clear_bit(AF2_IRQ_CLAIMED, &a->flags2);  	} -	if (a->flags2 & AF2_MSI_ENABLED) { +	if (test_bit(AF2_MSI_ENABLED, &a->flags2)) {  		pci_disable_msi(a->pcid); -		esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED); +		clear_bit(AF2_MSI_ENABLED, &a->flags2);  		esas2r_debug("MSI disabled");  	} @@ -641,12 +641,10 @@ void esas2r_kill_adapter(int i)  		pci_set_drvdata(a->pcid, NULL);  		esas2r_adapters[i] = NULL; -		if (a->flags2 & AF2_INIT_DONE) { -			esas2r_lock_clear_flags(&a->flags2, -						AF2_INIT_DONE); +		if (test_bit(AF2_INIT_DONE, &a->flags2)) { +			clear_bit(AF2_INIT_DONE, &a->flags2); -			esas2r_lock_set_flags(&a->flags, -					      AF_DEGRADED_MODE); +			set_bit(AF_DEGRADED_MODE, &a->flags);  			esas2r_log_dev(ESAS2R_LOG_INFO,  				       &(a->host->shost_gendev), @@ -759,7 +757,7 @@ int esas2r_resume(struct pci_dev *pdev)  	esas2r_claim_interrupts(a); -	if (a->flags2 & AF2_IRQ_CLAIMED) { +	if (test_bit(AF2_IRQ_CLAIMED, &a->flags2)) {  		/*  		 * Now that system interrupt(s) are claimed, we can enable  		 * chip interrupts. @@ -781,7 +779,7 @@ error_exit:  bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)  { -	esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); +	set_bit(AF_DEGRADED_MODE, &a->flags);  	esas2r_log(ESAS2R_LOG_CRIT,  		   "setting adapter to degraded mode: %s\n", error_str);  	return false; @@ -809,7 +807,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)  	int pcie_cap_reg;  	pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); -	if (0xffff & pcie_cap_reg) { +	if (pcie_cap_reg) {  		u16 devcontrol;  		pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, @@ -896,7 +894,7 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,  	    && (a->pcid->subsystem_device & ATTO_SSDID_TBT))  		a->flags2 |= AF2_THUNDERBOLT; -	if (a->flags2 & AF2_THUNDERBOLT) +	if (test_bit(AF2_THUNDERBOLT, &a->flags2))  		a->flags2 |= AF2_SERIAL_FLASH;  	if (a->pcid->subsystem_device == ATTO_TLSH_1068) @@ -956,14 +954,14 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,  	a->outbound_copy = (u32 volatile *)high;  	high += sizeof(u32); -	if (!(a->flags & AF_NVR_VALID)) +	if (!test_bit(AF_NVR_VALID, &a->flags))  		esas2r_nvram_set_defaults(a);  	/* update the caller's uncached memory area pointer */  	*uncached_area = (void *)high;  	/* initialize the allocated memory */ -	if (a->flags & AF_FIRST_INIT) { +	if (test_bit(AF_FIRST_INIT, &a->flags)) {  		memset(a->req_table, 0,  		       (num_requests + num_ae_requests +  			1) * sizeof(struct esas2r_request *)); @@ -1019,7 +1017,7 @@ bool esas2r_check_adapter(struct esas2r_adapter *a)  	 * if the chip reset detected flag is set, we can bypass a bunch of  	 * stuff.  	 */ -	if (a->flags & AF_CHPRST_DETECTED) +	if (test_bit(AF_CHPRST_DETECTED, &a->flags))  		goto skip_chip_reset;  	/* @@ -1057,14 +1055,12 @@ bool esas2r_check_adapter(struct esas2r_adapter *a)  						    doorbell);  			if (ver == DRBL_FW_VER_0) { -				esas2r_lock_set_flags(&a->flags, -						      AF_LEGACY_SGE_MODE); +				set_bit(AF_LEGACY_SGE_MODE, &a->flags);  				a->max_vdareq_size = 128;  				a->build_sgl = esas2r_build_sg_list_sge;  			} else if (ver == DRBL_FW_VER_1) { -				esas2r_lock_clear_flags(&a->flags, -							AF_LEGACY_SGE_MODE); +				clear_bit(AF_LEGACY_SGE_MODE, &a->flags);  				a->max_vdareq_size = 1024;  				a->build_sgl = esas2r_build_sg_list_prd; @@ -1139,7 +1135,7 @@ skip_chip_reset:  	*a->outbound_copy =  		a->last_write =  			a->last_read = a->list_size - 1; -	esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); +	set_bit(AF_COMM_LIST_TOGGLE, &a->flags);  	esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |  				    a->last_write);  	esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE | @@ -1204,9 +1200,9 @@ skip_chip_reset:  	 */  	doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);  	if (doorbell & DRBL_POWER_DOWN) -		esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN); +		set_bit(AF2_VDA_POWER_DOWN, &a->flags2);  	else -		esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN); +		clear_bit(AF2_VDA_POWER_DOWN, &a->flags2);  	/*  	 * enable assertion of outbound queue and doorbell interrupts in the @@ -1239,8 +1235,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,  				     0,  				     NULL);  		ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init; -		ci->sgl_page_size = sgl_page_size; -		ci->epoch_time = now.tv_sec; +		ci->sgl_page_size = cpu_to_le32(sgl_page_size); +		ci->epoch_time = cpu_to_le32(now.tv_sec);  		rq->flags |= RF_FAILURE_OK;  		a->init_msg = ESAS2R_INIT_MSG_INIT;  		break; @@ -1250,12 +1246,15 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,  		if (rq->req_stat == RS_SUCCESS) {  			u32 major;  			u32 minor; +			u16 fw_release;  			a->fw_version = le16_to_cpu(  				rq->func_rsp.cfg_rsp.vda_version);  			a->fw_build = rq->func_rsp.cfg_rsp.fw_build; -			major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release); -			minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release); +			fw_release = le16_to_cpu( +				rq->func_rsp.cfg_rsp.fw_release); +			major = LOBYTE(fw_release); +			minor = HIBYTE(fw_release);  			a->fw_version += (major << 16) + (minor << 24);  		} else {  			esas2r_hdebug("FAILED"); @@ -1266,9 +1265,8 @@ static bool esas2r_format_init_msg(struct esas2r_adapter *a,  		 * unsupported config requests correctly.  		 */ -		if ((a->flags2 & AF2_THUNDERBOLT) -		    || (be32_to_cpu(a->fw_version) > -			be32_to_cpu(0x47020052))) { +		if ((test_bit(AF2_THUNDERBOLT, &a->flags2)) +		    || (be32_to_cpu(a->fw_version) > 0x00524702)) {  			esas2r_hdebug("CFG get init");  			esas2r_build_cfg_req(a,  					     rq, @@ -1361,10 +1359,10 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)  	struct esas2r_request *rq;  	u32 i; -	if (a->flags & AF_DEGRADED_MODE) +	if (test_bit(AF_DEGRADED_MODE, &a->flags))  		goto exit; -	if (!(a->flags & AF_NVR_VALID)) { +	if (!test_bit(AF_NVR_VALID, &a->flags)) {  		if (!esas2r_nvram_read_direct(a))  			esas2r_log(ESAS2R_LOG_WARN,  				   "invalid/missing NVRAM parameters"); @@ -1376,8 +1374,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)  	}  	/* The firmware is ready. */ -	esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE); -	esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); +	clear_bit(AF_DEGRADED_MODE, &a->flags); +	clear_bit(AF_CHPRST_PENDING, &a->flags);  	/* Post all the async event requests */  	for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++) @@ -1398,8 +1396,8 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)  	esas2r_hdebug("firmware revision: %s", a->fw_rev); -	if ((a->flags & AF_CHPRST_DETECTED) -	    && (a->flags & AF_FIRST_INIT)) { +	if (test_bit(AF_CHPRST_DETECTED, &a->flags) +	    && (test_bit(AF_FIRST_INIT, &a->flags))) {  		esas2r_enable_chip_interrupts(a);  		return true;  	} @@ -1423,18 +1421,18 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)  		 * Block Tasklets from getting scheduled and indicate this is  		 * polled discovery.  		 */ -		esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED); -		esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED); +		set_bit(AF_TASKLET_SCHEDULED, &a->flags); +		set_bit(AF_DISC_POLLED, &a->flags);  		/*  		 * Temporarily bring the disable count to zero to enable  		 * deferred processing.  Note that the count is already zero  		 * after the first initialization.  		 */ -		if (a->flags & AF_FIRST_INIT) +		if (test_bit(AF_FIRST_INIT, &a->flags))  			atomic_dec(&a->disable_cnt); -		while (a->flags & AF_DISC_PENDING) { +		while (test_bit(AF_DISC_PENDING, &a->flags)) {  			schedule_timeout_interruptible(msecs_to_jiffies(100));  			/* @@ -1453,7 +1451,7 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)  			 * we have to make sure the timer tick processes the  			 * doorbell indicating the firmware is ready.  			 */ -			if (!(a->flags & AF_CHPRST_PENDING)) +			if (!test_bit(AF_CHPRST_PENDING, &a->flags))  				esas2r_disc_check_for_work(a);  			/* Simulate a timer tick. */ @@ -1473,11 +1471,11 @@ bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)  		} -		if (a->flags & AF_FIRST_INIT) +		if (test_bit(AF_FIRST_INIT, &a->flags))  			atomic_inc(&a->disable_cnt); -		esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED); -		esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); +		clear_bit(AF_DISC_POLLED, &a->flags); +		clear_bit(AF_TASKLET_SCHEDULED, &a->flags);  	} @@ -1504,26 +1502,26 @@ exit:  	 * need to get done before we exit.  	 */ -	if ((a->flags & AF_CHPRST_DETECTED) -	    && (a->flags & AF_FIRST_INIT)) { +	if (test_bit(AF_CHPRST_DETECTED, &a->flags) && +	    test_bit(AF_FIRST_INIT, &a->flags)) {  		/*  		 * Reinitialization was performed during the first  		 * initialization.  Only clear the chip reset flag so the  		 * original device polling is not cancelled.  		 */  		if (!rslt) -			esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); +			clear_bit(AF_CHPRST_PENDING, &a->flags);  	} else {  		/* First initialization or a subsequent re-init is complete. */  		if (!rslt) { -			esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); -			esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); +			clear_bit(AF_CHPRST_PENDING, &a->flags); +			clear_bit(AF_DISC_PENDING, &a->flags);  		}  		/* Enable deferred processing after the first initialization. */ -		if (a->flags & AF_FIRST_INIT) { -			esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT); +		if (test_bit(AF_FIRST_INIT, &a->flags)) { +			clear_bit(AF_FIRST_INIT, &a->flags);  			if (atomic_dec_return(&a->disable_cnt) == 0)  				esas2r_do_deferred_processes(a); @@ -1535,7 +1533,7 @@ exit:  void esas2r_reset_adapter(struct esas2r_adapter *a)  { -	esas2r_lock_set_flags(&a->flags, AF_OS_RESET); +	set_bit(AF_OS_RESET, &a->flags);  	esas2r_local_reset_adapter(a);  	esas2r_schedule_tasklet(a);  } @@ -1550,17 +1548,17 @@ void esas2r_reset_chip(struct esas2r_adapter *a)  	 * dump is located in the upper 512KB of the onchip SRAM.  Make sure  	 * to not overwrite a previous crash that was saved.  	 */ -	if ((a->flags2 & AF2_COREDUMP_AVAIL) -	    && !(a->flags2 & AF2_COREDUMP_SAVED)) { +	if (test_bit(AF2_COREDUMP_AVAIL, &a->flags2) && +	    !test_bit(AF2_COREDUMP_SAVED, &a->flags2)) {  		esas2r_read_mem_block(a,  				      a->fw_coredump_buff,  				      MW_DATA_ADDR_SRAM + 0x80000,  				      ESAS2R_FWCOREDUMP_SZ); -		esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED); +		set_bit(AF2_COREDUMP_SAVED, &a->flags2);  	} -	esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL); +	clear_bit(AF2_COREDUMP_AVAIL, &a->flags2);  	/* Reset the chip */  	if (a->pcid->revision == MVR_FREY_B2) @@ -1606,10 +1604,10 @@ static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)   */  void esas2r_power_down(struct esas2r_adapter *a)  { -	esas2r_lock_set_flags(&a->flags, AF_POWER_MGT); -	esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN); +	set_bit(AF_POWER_MGT, &a->flags); +	set_bit(AF_POWER_DOWN, &a->flags); -	if (!(a->flags & AF_DEGRADED_MODE)) { +	if (!test_bit(AF_DEGRADED_MODE, &a->flags)) {  		u32 starttime;  		u32 doorbell; @@ -1649,14 +1647,14 @@ void esas2r_power_down(struct esas2r_adapter *a)  		 * For versions of firmware that support it tell them the driver  		 * is powering down.  		 */ -		if (a->flags2 & AF2_VDA_POWER_DOWN) +		if (test_bit(AF2_VDA_POWER_DOWN, &a->flags2))  			esas2r_power_down_notify_firmware(a);  	}  	/* Suspend I/O processing. */ -	esas2r_lock_set_flags(&a->flags, AF_OS_RESET); -	esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING); -	esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING); +	set_bit(AF_OS_RESET, &a->flags); +	set_bit(AF_DISC_PENDING, &a->flags); +	set_bit(AF_CHPRST_PENDING, &a->flags);  	esas2r_process_adapter_reset(a); @@ -1673,9 +1671,9 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)  {  	bool ret; -	esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN); +	clear_bit(AF_POWER_DOWN, &a->flags);  	esas2r_init_pci_cfg_space(a); -	esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT); +	set_bit(AF_FIRST_INIT, &a->flags);  	atomic_inc(&a->disable_cnt);  	/* reinitialize the adapter */ @@ -1687,17 +1685,17 @@ bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)  	esas2r_send_reset_ae(a, true);  	/* clear this flag after initialization. */ -	esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT); +	clear_bit(AF_POWER_MGT, &a->flags);  	return ret;  }  bool esas2r_is_adapter_present(struct esas2r_adapter *a)  { -	if (a->flags & AF_NOT_PRESENT) +	if (test_bit(AF_NOT_PRESENT, &a->flags))  		return false;  	if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) { -		esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT); +		set_bit(AF_NOT_PRESENT, &a->flags);  		return false;  	} diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c index c2d4ff57c5c..f16d6bcf9bb 100644 --- a/drivers/scsi/esas2r/esas2r_int.c +++ b/drivers/scsi/esas2r/esas2r_int.c @@ -96,7 +96,7 @@ irqreturn_t esas2r_interrupt(int irq, void *dev_id)  	if (!esas2r_adapter_interrupt_pending(a))  		return IRQ_NONE; -	esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING); +	set_bit(AF2_INT_PENDING, &a->flags2);  	esas2r_schedule_tasklet(a);  	return IRQ_HANDLED; @@ -317,9 +317,10 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a)  	 *  = 2 - can start any request  	 */ -	if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING)) +	if (test_bit(AF_CHPRST_PENDING, &a->flags) || +	    test_bit(AF_FLASHING, &a->flags))  		startreqs = 0; -	else if (a->flags & AF_DISC_PENDING) +	else if (test_bit(AF_DISC_PENDING, &a->flags))  		startreqs = 1;  	atomic_inc(&a->disable_cnt); @@ -367,7 +368,7 @@ void esas2r_do_deferred_processes(struct esas2r_adapter *a)  				 * Flashing could have been set by last local  				 * start  				 */ -				if (a->flags & AF_FLASHING) +				if (test_bit(AF_FLASHING, &a->flags))  					break;  			}  		} @@ -404,7 +405,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a)  		dc->disc_evt = 0; -		esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG); +		clear_bit(AF_DISC_IN_PROG, &a->flags);  	}  	/* @@ -425,7 +426,7 @@ void esas2r_process_adapter_reset(struct esas2r_adapter *a)  		a->last_write =  			a->last_read = a->list_size - 1; -	esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); +	set_bit(AF_COMM_LIST_TOGGLE, &a->flags);  	/* Kill all the requests on the active list */  	list_for_each(element, &a->defer_list) { @@ -470,7 +471,7 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a)  	if (atomic_read(&a->disable_cnt) == 0)  		esas2r_do_deferred_processes(a); -	esas2r_lock_clear_flags(&a->flags, AF_OS_RESET); +	clear_bit(AF_OS_RESET, &a->flags);  	esas2r_trace_exit();  } @@ -478,10 +479,10 @@ static void esas2r_process_bus_reset(struct esas2r_adapter *a)  static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)  { -	esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED); -	esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); -	esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); -	esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); +	clear_bit(AF_CHPRST_NEEDED, &a->flags); +	clear_bit(AF_BUSRST_NEEDED, &a->flags); +	clear_bit(AF_BUSRST_DETECTED, &a->flags); +	clear_bit(AF_BUSRST_PENDING, &a->flags);  	/*  	 * Make sure we don't get attempt more than 3 resets  	 * when the uptime between resets does not exceed one @@ -507,10 +508,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)  		 * prevent the heartbeat from trying to recover.  		 */ -		esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE); -		esas2r_lock_set_flags(&a->flags, AF_DISABLED); -		esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING); -		esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING); +		set_bit(AF_DEGRADED_MODE, &a->flags); +		set_bit(AF_DISABLED, &a->flags); +		clear_bit(AF_CHPRST_PENDING, &a->flags); +		clear_bit(AF_DISC_PENDING, &a->flags);  		esas2r_disable_chip_interrupts(a);  		a->int_mask = 0; @@ -519,18 +520,17 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)  		esas2r_log(ESAS2R_LOG_CRIT,  			   "Adapter disabled because of hardware failure");  	} else { -		u32 flags = -			esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED); +		bool alrdyrst = test_and_set_bit(AF_CHPRST_STARTED, &a->flags); -		if (!(flags & AF_CHPRST_STARTED)) +		if (!alrdyrst)  			/*  			 * Only disable interrupts if this is  			 * the first reset attempt.  			 */  			esas2r_disable_chip_interrupts(a); -		if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) && -		    !(flags & AF_CHPRST_STARTED)) { +		if ((test_bit(AF_POWER_MGT, &a->flags)) && +		    !test_bit(AF_FIRST_INIT, &a->flags) && !alrdyrst) {  			/*  			 * Don't reset the chip on the first  			 * deferred power up attempt. @@ -543,10 +543,10 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)  		/* Kick off the reinitialization */  		a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;  		a->chip_init_time = jiffies_to_msecs(jiffies); -		if (!(a->flags & AF_POWER_MGT)) { +		if (!test_bit(AF_POWER_MGT, &a->flags)) {  			esas2r_process_adapter_reset(a); -			if (!(flags & AF_CHPRST_STARTED)) { +			if (!alrdyrst) {  				/* Remove devices now that I/O is cleaned up. */  				a->prev_dev_cnt =  					esas2r_targ_db_get_tgt_cnt(a); @@ -560,38 +560,37 @@ static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)  static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)  { -	while (a->flags & AF_CHPRST_DETECTED) { +	while (test_bit(AF_CHPRST_DETECTED, &a->flags)) {  		/*  		 * Balance the enable in esas2r_initadapter_hw.  		 * Esas2r_power_down already took care of it for power  		 * management.  		 */ -		if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags & -							AF_POWER_MGT)) +		if (!test_bit(AF_DEGRADED_MODE, &a->flags) && +		    !test_bit(AF_POWER_MGT, &a->flags))  			esas2r_disable_chip_interrupts(a);  		/* Reinitialize the chip. */  		esas2r_check_adapter(a);  		esas2r_init_adapter_hw(a, 0); -		if (a->flags & AF_CHPRST_NEEDED) +		if (test_bit(AF_CHPRST_NEEDED, &a->flags))  			break; -		if (a->flags & AF_POWER_MGT) { +		if (test_bit(AF_POWER_MGT, &a->flags)) {  			/* Recovery from power management. */ -			if (a->flags & AF_FIRST_INIT) { +			if (test_bit(AF_FIRST_INIT, &a->flags)) {  				/* Chip reset during normal power up */  				esas2r_log(ESAS2R_LOG_CRIT,  					   "The firmware was reset during a normal power-up sequence");  			} else {  				/* Deferred power up complete. */ -				esas2r_lock_clear_flags(&a->flags, -							AF_POWER_MGT); +				clear_bit(AF_POWER_MGT, &a->flags);  				esas2r_send_reset_ae(a, true);  			}  		} else {  			/* Recovery from online chip reset. */ -			if (a->flags & AF_FIRST_INIT) { +			if (test_bit(AF_FIRST_INIT, &a->flags)) {  				/* Chip reset during driver load */  			} else {  				/* Chip reset after driver load */ @@ -602,14 +601,14 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)  				   "Recovering from a chip reset while the chip was online");  		} -		esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED); +		clear_bit(AF_CHPRST_STARTED, &a->flags);  		esas2r_enable_chip_interrupts(a);  		/*  		 * Clear this flag last!  this indicates that the chip has been  		 * reset already during initialization.  		 */ -		esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED); +		clear_bit(AF_CHPRST_DETECTED, &a->flags);  	}  } @@ -617,26 +616,28 @@ static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)  /* Perform deferred tasks when chip interrupts are disabled */  void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)  { -	if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) { -		if (a->flags & AF_CHPRST_NEEDED) + +	if (test_bit(AF_CHPRST_NEEDED, &a->flags) || +	    test_bit(AF_CHPRST_DETECTED, &a->flags)) { +		if (test_bit(AF_CHPRST_NEEDED, &a->flags))  			esas2r_chip_rst_needed_during_tasklet(a);  		esas2r_handle_chip_rst_during_tasklet(a);  	} -	if (a->flags & AF_BUSRST_NEEDED) { +	if (test_bit(AF_BUSRST_NEEDED, &a->flags)) {  		esas2r_hdebug("hard resetting bus"); -		esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED); +		clear_bit(AF_BUSRST_NEEDED, &a->flags); -		if (a->flags & AF_FLASHING) -			esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); +		if (test_bit(AF_FLASHING, &a->flags)) +			set_bit(AF_BUSRST_DETECTED, &a->flags);  		else  			esas2r_write_register_dword(a, MU_DOORBELL_IN,  						    DRBL_RESET_BUS);  	} -	if (a->flags & AF_BUSRST_DETECTED) { +	if (test_bit(AF_BUSRST_DETECTED, &a->flags)) {  		esas2r_process_bus_reset(a);  		esas2r_log_dev(ESAS2R_LOG_WARN, @@ -645,14 +646,14 @@ void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)  		scsi_report_bus_reset(a->host, 0); -		esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED); -		esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING); +		clear_bit(AF_BUSRST_DETECTED, &a->flags); +		clear_bit(AF_BUSRST_PENDING, &a->flags);  		esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");  	} -	if (a->flags & AF_PORT_CHANGE) { -		esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE); +	if (test_bit(AF_PORT_CHANGE, &a->flags)) { +		clear_bit(AF_PORT_CHANGE, &a->flags);  		esas2r_targ_db_report_changes(a);  	} @@ -672,10 +673,10 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)  	esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);  	if (doorbell & DRBL_RESET_BUS) -		esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED); +		set_bit(AF_BUSRST_DETECTED, &a->flags);  	if (doorbell & DRBL_FORCE_INT) -		esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT); +		clear_bit(AF_HEARTBEAT, &a->flags);  	if (doorbell & DRBL_PANIC_REASON_MASK) {  		esas2r_hdebug("*** Firmware Panic ***"); @@ -683,7 +684,7 @@ static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)  	}  	if (doorbell & DRBL_FW_RESET) { -		esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL); +		set_bit(AF2_COREDUMP_AVAIL, &a->flags2);  		esas2r_local_reset_adapter(a);  	} @@ -918,7 +919,7 @@ void esas2r_complete_request(struct esas2r_adapter *a,  {  	if (rq->vrq->scsi.function == VDA_FUNC_FLASH  	    && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT) -		esas2r_lock_clear_flags(&a->flags, AF_FLASHING); +		clear_bit(AF_FLASHING, &a->flags);  	/* See if we setup a callback to do special processing */ diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c index 324e2626a08..a8df916cd57 100644 --- a/drivers/scsi/esas2r/esas2r_io.c +++ b/drivers/scsi/esas2r/esas2r_io.c @@ -49,7 +49,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)  	struct esas2r_request *startrq = rq;  	unsigned long flags; -	if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) { +	if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags) || +		     test_bit(AF_POWER_DOWN, &a->flags))) {  		if (rq->vrq->scsi.function == VDA_FUNC_SCSI)  			rq->req_stat = RS_SEL2;  		else @@ -69,8 +70,8 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)  			 * Note that if AF_DISC_PENDING is set than this will  			 * go on the defer queue.  			 */ -			if (unlikely(t->target_state != TS_PRESENT -				     && !(a->flags & AF_DISC_PENDING))) +			if (unlikely(t->target_state != TS_PRESENT && +				     !test_bit(AF_DISC_PENDING, &a->flags)))  				rq->req_stat = RS_SEL;  		}  	} @@ -91,8 +92,9 @@ void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)  	spin_lock_irqsave(&a->queue_lock, flags);  	if (likely(list_empty(&a->defer_list) && -		   !(a->flags & -		     (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING)))) +		   !test_bit(AF_CHPRST_PENDING, &a->flags) && +		   !test_bit(AF_FLASHING, &a->flags) && +		   !test_bit(AF_DISC_PENDING, &a->flags)))  		esas2r_local_start_request(a, startrq);  	else  		list_add_tail(&startrq->req_list, &a->defer_list); @@ -124,7 +126,7 @@ void esas2r_local_start_request(struct esas2r_adapter *a,  	if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH  		     && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)) -		esas2r_lock_set_flags(&a->flags, AF_FLASHING); +		set_bit(AF_FLASHING, &a->flags);  	list_add_tail(&rq->req_list, &a->active_list);  	esas2r_start_vda_request(a, rq); @@ -147,11 +149,10 @@ void esas2r_start_vda_request(struct esas2r_adapter *a,  	if (a->last_write >= a->list_size) {  		a->last_write = 0;  		/* update the toggle bit */ -		if (a->flags & AF_COMM_LIST_TOGGLE) -			esas2r_lock_clear_flags(&a->flags, -						AF_COMM_LIST_TOGGLE); +		if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags)) +			clear_bit(AF_COMM_LIST_TOGGLE, &a->flags);  		else -			esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE); +			set_bit(AF_COMM_LIST_TOGGLE, &a->flags);  	}  	element = @@ -169,7 +170,7 @@ void esas2r_start_vda_request(struct esas2r_adapter *a,  	/* Update the write pointer */  	dw = a->last_write; -	if (a->flags & AF_COMM_LIST_TOGGLE) +	if (test_bit(AF_COMM_LIST_TOGGLE, &a->flags))  		dw |= MU_ILW_TOGGLE;  	esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle); @@ -687,18 +688,14 @@ static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)  			esas2r_write_register_dword(a, MU_DOORBELL_OUT,  						    doorbell);  			if (ver == DRBL_FW_VER_0) { -				esas2r_lock_set_flags(&a->flags, -						      AF_CHPRST_DETECTED); -				esas2r_lock_set_flags(&a->flags, -						      AF_LEGACY_SGE_MODE); +				set_bit(AF_CHPRST_DETECTED, &a->flags); +				set_bit(AF_LEGACY_SGE_MODE, &a->flags);  				a->max_vdareq_size = 128;  				a->build_sgl = esas2r_build_sg_list_sge;  			} else if (ver == DRBL_FW_VER_1) { -				esas2r_lock_set_flags(&a->flags, -						      AF_CHPRST_DETECTED); -				esas2r_lock_clear_flags(&a->flags, -							AF_LEGACY_SGE_MODE); +				set_bit(AF_CHPRST_DETECTED, &a->flags); +				clear_bit(AF_LEGACY_SGE_MODE, &a->flags);  				a->max_vdareq_size = 1024;  				a->build_sgl = esas2r_build_sg_list_prd; @@ -719,28 +716,27 @@ void esas2r_timer_tick(struct esas2r_adapter *a)  	a->last_tick_time = currtime;  	/* count down the uptime */ -	if (a->chip_uptime -	    && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { +	if (a->chip_uptime && +	    !test_bit(AF_CHPRST_PENDING, &a->flags) && +	    !test_bit(AF_DISC_PENDING, &a->flags)) {  		if (deltatime >= a->chip_uptime)  			a->chip_uptime = 0;  		else  			a->chip_uptime -= deltatime;  	} -	if (a->flags & AF_CHPRST_PENDING) { -		if (!(a->flags & AF_CHPRST_NEEDED) -		    && !(a->flags & AF_CHPRST_DETECTED)) +	if (test_bit(AF_CHPRST_PENDING, &a->flags)) { +		if (!test_bit(AF_CHPRST_NEEDED, &a->flags) && +		    !test_bit(AF_CHPRST_DETECTED, &a->flags))  			esas2r_handle_pending_reset(a, currtime);  	} else { -		if (a->flags & AF_DISC_PENDING) +		if (test_bit(AF_DISC_PENDING, &a->flags))  			esas2r_disc_check_complete(a); - -		if (a->flags & AF_HEARTBEAT_ENB) { -			if (a->flags & AF_HEARTBEAT) { +		if (test_bit(AF_HEARTBEAT_ENB, &a->flags)) { +			if (test_bit(AF_HEARTBEAT, &a->flags)) {  				if ((currtime - a->heartbeat_time) >=  				    ESAS2R_HEARTBEAT_TIME) { -					esas2r_lock_clear_flags(&a->flags, -								AF_HEARTBEAT); +					clear_bit(AF_HEARTBEAT, &a->flags);  					esas2r_hdebug("heartbeat failed");  					esas2r_log(ESAS2R_LOG_CRIT,  						   "heartbeat failed"); @@ -748,7 +744,7 @@ void esas2r_timer_tick(struct esas2r_adapter *a)  					esas2r_local_reset_adapter(a);  				}  			} else { -				esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT); +				set_bit(AF_HEARTBEAT, &a->flags);  				a->heartbeat_time = currtime;  				esas2r_force_interrupt(a);  			} @@ -812,7 +808,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a,  	rqaux->vrq->scsi.flags |=  		cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK)); -	if (a->flags & AF_FLASHING) { +	if (test_bit(AF_FLASHING, &a->flags)) {  		/* Assume success.  if there are active requests, return busy */  		rqaux->req_stat = RS_SUCCESS; @@ -831,7 +827,7 @@ bool esas2r_send_task_mgmt(struct esas2r_adapter *a,  	spin_unlock_irqrestore(&a->queue_lock, flags); -	if (!(a->flags & AF_FLASHING)) +	if (!test_bit(AF_FLASHING, &a->flags))  		esas2r_start_request(a, rqaux);  	esas2r_comp_list_drain(a, &comp_list); @@ -848,11 +844,12 @@ void esas2r_reset_bus(struct esas2r_adapter *a)  {  	esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset"); -	if (!(a->flags & AF_DEGRADED_MODE) -	    && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) { -		esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED); -		esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING); -		esas2r_lock_set_flags(&a->flags, AF_OS_RESET); +	if (!test_bit(AF_DEGRADED_MODE, &a->flags) && +	    !test_bit(AF_CHPRST_PENDING, &a->flags) && +	    !test_bit(AF_DISC_PENDING, &a->flags)) { +		set_bit(AF_BUSRST_NEEDED, &a->flags); +		set_bit(AF_BUSRST_PENDING, &a->flags); +		set_bit(AF_OS_RESET, &a->flags);  		esas2r_schedule_tasklet(a);  	} diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index e5b09027e06..d89a0277a8e 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -347,7 +347,7 @@ static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,  {  	struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl; -	if (a->flags & AF_DEGRADED_MODE) +	if (test_bit(AF_DEGRADED_MODE, &a->flags))  		return false;  	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge); @@ -463,7 +463,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,  		gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));  		gcc->bios_build_rev = LOWORD(a->flash_ver); -		if (a->flags2 & AF2_THUNDERLINK) +		if (test_bit(AF2_THUNDERLINK, &a->flags2))  			gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA  					   | CSMI_CNTLRF_SATA_HBA;  		else @@ -485,7 +485,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a,  	{  		struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts; -		if (a->flags & AF_DEGRADED_MODE) +		if (test_bit(AF_DEGRADED_MODE, &a->flags))  			gcs->status = CSMI_CNTLR_STS_FAILED;  		else  			gcs->status = CSMI_CNTLR_STS_GOOD; @@ -819,10 +819,10 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,  		gai->adap_type = ATTO_GAI_AT_ESASRAID2; -		if (a->flags2 & AF2_THUNDERLINK) +		if (test_bit(AF2_THUNDERLINK, &a->flags2))  			gai->adap_type = ATTO_GAI_AT_TLSASHBA; -		if (a->flags & AF_DEGRADED_MODE) +		if (test_bit(AF_DEGRADED_MODE, &a->flags))  			gai->adap_flags |= ATTO_GAI_AF_DEGRADED;  		gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP | @@ -938,7 +938,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,  				u32 total_len = ESAS2R_FWCOREDUMP_SZ;  				/* Size is zero if a core dump isn't present */ -				if (!(a->flags2 & AF2_COREDUMP_SAVED)) +				if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))  					total_len = 0;  				if (len > total_len) @@ -960,8 +960,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,  				memset(a->fw_coredump_buff, 0,  				       ESAS2R_FWCOREDUMP_SZ); -				esas2r_lock_clear_flags(&a->flags2, -							AF2_COREDUMP_SAVED); +				clear_bit(AF2_COREDUMP_SAVED, &a->flags2);  			} else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {  				hi->status = ATTO_STS_UNSUPPORTED;  				break; @@ -973,7 +972,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,  			trc->total_length = ESAS2R_FWCOREDUMP_SZ;  			/* Return zero length buffer if core dump not present */ -			if (!(a->flags2 & AF2_COREDUMP_SAVED)) +			if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))  				trc->total_length = 0;  		} else {  			hi->status = ATTO_STS_UNSUPPORTED; @@ -1048,6 +1047,7 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,  		else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)  			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q); +  		if (!esas2r_build_sg_list(a, rq, sgc)) {  			hi->status = ATTO_STS_OUT_OF_RSRC;  			break; @@ -1139,15 +1139,15 @@ static int hba_ioctl_callback(struct esas2r_adapter *a,  			break;  		} -		if (a->flags & AF_CHPRST_NEEDED) +		if (test_bit(AF_CHPRST_NEEDED, &a->flags))  			ac->adap_state = ATTO_AC_AS_RST_SCHED; -		else if (a->flags & AF_CHPRST_PENDING) +		else if (test_bit(AF_CHPRST_PENDING, &a->flags))  			ac->adap_state = ATTO_AC_AS_RST_IN_PROG; -		else if (a->flags & AF_DISC_PENDING) +		else if (test_bit(AF_DISC_PENDING, &a->flags))  			ac->adap_state = ATTO_AC_AS_RST_DISC; -		else if (a->flags & AF_DISABLED) +		else if (test_bit(AF_DISABLED, &a->flags))  			ac->adap_state = ATTO_AC_AS_DISABLED; -		else if (a->flags & AF_DEGRADED_MODE) +		else if (test_bit(AF_DEGRADED_MODE, &a->flags))  			ac->adap_state = ATTO_AC_AS_DEGRADED;  		else  			ac->adap_state = ATTO_AC_AS_OK; diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c index 9bf285df58d..a82030aa857 100644 --- a/drivers/scsi/esas2r/esas2r_log.c +++ b/drivers/scsi/esas2r/esas2r_log.c @@ -165,13 +165,9 @@ static int esas2r_log_master(const long level,  		/*  		 * Put a line break at the end of the formatted string so that -		 * we don't wind up with run-on messages.  only append if there -		 * is enough space in the buffer. +		 * we don't wind up with run-on messages.  		 */ -		if (strlen(event_buffer) < buflen) -			strcat(buffer, "\n"); - -		printk(event_buffer); +		printk("%s\n", event_buffer);  		spin_unlock_irqrestore(&event_buffer_lock, flags);  	} diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index 4abf1272e1e..6504a195c87 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -390,7 +390,7 @@ static int esas2r_probe(struct pci_dev *pcid,  	esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),  		       "pci_enable_device() OK");  	esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev), -		       "after pci_device_enable() enable_cnt: %d", +		       "after pci_enable_device() enable_cnt: %d",  		       pcid->enable_cnt.counter);  	host = scsi_host_alloc(&driver_template, host_alloc_size); @@ -889,7 +889,7 @@ int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)  	/* Assume success, if it fails we will fix the result later. */  	cmd->result = DID_OK << 16; -	if (unlikely(a->flags & AF_DEGRADED_MODE)) { +	if (unlikely(test_bit(AF_DEGRADED_MODE, &a->flags))) {  		cmd->result = DID_NO_CONNECT << 16;  		cmd->scsi_done(cmd);  		return 0; @@ -1050,7 +1050,7 @@ int esas2r_eh_abort(struct scsi_cmnd *cmd)  	esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd); -	if (a->flags & AF_DEGRADED_MODE) { +	if (test_bit(AF_DEGRADED_MODE, &a->flags)) {  		cmd->result = DID_ABORT << 16;  		scsi_set_resid(cmd, 0); @@ -1131,7 +1131,7 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)  	struct esas2r_adapter *a =  		(struct esas2r_adapter *)cmd->device->host->hostdata; -	if (a->flags & AF_DEGRADED_MODE) +	if (test_bit(AF_DEGRADED_MODE, &a->flags))  		return FAILED;  	if (host_reset) @@ -1141,14 +1141,14 @@ static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)  	/* above call sets the AF_OS_RESET flag.  wait for it to clear. */ -	while (a->flags & AF_OS_RESET) { +	while (test_bit(AF_OS_RESET, &a->flags)) {  		msleep(10); -		if (a->flags & AF_DEGRADED_MODE) +		if (test_bit(AF_DEGRADED_MODE, &a->flags))  			return FAILED;  	} -	if (a->flags & AF_DEGRADED_MODE) +	if (test_bit(AF_DEGRADED_MODE, &a->flags))  		return FAILED;  	return SUCCESS; @@ -1176,7 +1176,7 @@ static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)  	u8 task_management_status = RS_PENDING;  	bool completed; -	if (a->flags & AF_DEGRADED_MODE) +	if (test_bit(AF_DEGRADED_MODE, &a->flags))  		return FAILED;  retry: @@ -1229,7 +1229,7 @@ retry:  			msleep(10);  	} -	if (a->flags & AF_DEGRADED_MODE) +	if (test_bit(AF_DEGRADED_MODE, &a->flags))  		return FAILED;  	if (task_management_status == RS_BUSY) { @@ -1666,13 +1666,13 @@ void esas2r_adapter_tasklet(unsigned long context)  {  	struct esas2r_adapter *a = (struct esas2r_adapter *)context; -	if (unlikely(a->flags2 & AF2_TIMER_TICK)) { -		esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK); +	if (unlikely(test_bit(AF2_TIMER_TICK, &a->flags2))) { +		clear_bit(AF2_TIMER_TICK, &a->flags2);  		esas2r_timer_tick(a);  	} -	if (likely(a->flags2 & AF2_INT_PENDING)) { -		esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING); +	if (likely(test_bit(AF2_INT_PENDING, &a->flags2))) { +		clear_bit(AF2_INT_PENDING, &a->flags2);  		esas2r_adapter_interrupt(a);  	} @@ -1680,12 +1680,12 @@ void esas2r_adapter_tasklet(unsigned long context)  		esas2r_do_tasklet_tasks(a);  	if (esas2r_is_tasklet_pending(a) -	    || (a->flags2 & AF2_INT_PENDING) -	    || (a->flags2 & AF2_TIMER_TICK)) { -		esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); +	    || (test_bit(AF2_INT_PENDING, &a->flags2)) +	    || (test_bit(AF2_TIMER_TICK, &a->flags2))) { +		clear_bit(AF_TASKLET_SCHEDULED, &a->flags);  		esas2r_schedule_tasklet(a);  	} else { -		esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED); +		clear_bit(AF_TASKLET_SCHEDULED, &a->flags);  	}  } @@ -1707,7 +1707,7 @@ static void esas2r_timer_callback(unsigned long context)  {  	struct esas2r_adapter *a = (struct esas2r_adapter *)context; -	esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK); +	set_bit(AF2_TIMER_TICK, &a->flags2);  	esas2r_schedule_tasklet(a); diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c index e540a2fa3d1..bf45beaad43 100644 --- a/drivers/scsi/esas2r/esas2r_targdb.c +++ b/drivers/scsi/esas2r/esas2r_targdb.c @@ -86,7 +86,7 @@ void esas2r_targ_db_report_changes(struct esas2r_adapter *a)  	esas2r_trace_enter(); -	if (a->flags & AF_DISC_PENDING) { +	if (test_bit(AF_DISC_PENDING, &a->flags)) {  		esas2r_trace_exit();  		return;  	} diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c index fd139287964..30028e56df6 100644 --- a/drivers/scsi/esas2r/esas2r_vda.c +++ b/drivers/scsi/esas2r/esas2r_vda.c @@ -84,7 +84,7 @@ bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,  		return false;  	} -	if (a->flags & AF_DEGRADED_MODE) { +	if (test_bit(AF_DEGRADED_MODE, &a->flags)) {  		vi->status = ATTO_STS_DEGRADED;  		return false;  	} @@ -310,9 +310,9 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,  				le32_to_cpu(rsp->vda_version);  			cfg->data.init.fw_build = rsp->fw_build; -			snprintf(buf, sizeof(buf), "%1d.%02d", -				(int)LOBYTE(le16_to_cpu(rsp->fw_release)), -				(int)HIBYTE(le16_to_cpu(rsp->fw_release))); +			snprintf(buf, sizeof(buf), "%1.1u.%2.2u", +				 (int)LOBYTE(le16_to_cpu(rsp->fw_release)), +				 (int)HIBYTE(le16_to_cpu(rsp->fw_release)));  			memcpy(&cfg->data.init.fw_release, buf,  			       sizeof(cfg->data.init.fw_release)); @@ -389,7 +389,7 @@ void esas2r_build_mgt_req(struct esas2r_adapter *a,  	vrq->length = cpu_to_le32(length);  	if (vrq->length) { -		if (a->flags & AF_LEGACY_SGE_MODE) { +		if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {  			vrq->sg_list_offset = (u8)offsetof(  				struct atto_vda_mgmt_req, sge); @@ -427,7 +427,7 @@ void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)  	vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data)); -	if (a->flags & AF_LEGACY_SGE_MODE) { +	if (test_bit(AF_LEGACY_SGE_MODE, &a->flags)) {  		vrq->sg_list_offset =  			(u8)offsetof(struct atto_vda_ae_req, sge);  		vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 07453bbf05e..00ee0ed642a 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -408,6 +408,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,  	}  	ctlr = fcoe_ctlr_device_priv(ctlr_dev); +	ctlr->cdev = ctlr_dev;  	fcoe = fcoe_ctlr_priv(ctlr);  	dev_hold(netdev); @@ -1440,22 +1441,28 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,  	ctlr = fcoe_to_ctlr(fcoe);  	lport = ctlr->lp;  	if (unlikely(!lport)) { -		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); +		FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n");  		goto err2;  	}  	if (!lport->link_up)  		goto err2; -	FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p " -			"data:%p tail:%p end:%p sum:%d dev:%s", +	FCOE_NETDEV_DBG(netdev, +			"skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",  			skb->len, skb->data_len, skb->head, skb->data,  			skb_tail_pointer(skb), skb_end_pointer(skb),  			skb->csum, skb->dev ? skb->dev->name : "<NULL>"); + +	skb = skb_share_check(skb, GFP_ATOMIC); + +	if (skb == NULL) +		return NET_RX_DROP; +  	eh = eth_hdr(skb);  	if (is_fip_mode(ctlr) && -	    compare_ether_addr(eh->h_source, ctlr->dest_addr)) { +	    !ether_addr_equal(eh->h_source, ctlr->dest_addr)) {  		FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",  				eh->h_source);  		goto err; @@ -1540,13 +1547,13 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,  		wake_up_process(fps->thread);  	spin_unlock(&fps->fcoe_rx_list.lock); -	return 0; +	return NET_RX_SUCCESS;  err:  	per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;  	put_cpu();  err2:  	kfree_skb(skb); -	return -1; +	return NET_RX_DROP;  }  /** @@ -1788,13 +1795,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)  	lport = fr->fr_dev;  	if (unlikely(!lport)) {  		if (skb->destructor != fcoe_percpu_flush_done) -			FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb"); +			FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");  		kfree_skb(skb);  		return;  	} -	FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d " -			"head:%p data:%p tail:%p end:%p sum:%d dev:%s", +	FCOE_NETDEV_DBG(skb->dev, +			"skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",  			skb->len, skb->data_len,  			skb->head, skb->data, skb_tail_pointer(skb),  			skb_end_pointer(skb), skb->csum, @@ -1865,7 +1872,7 @@ static int fcoe_percpu_receive_thread(void *arg)  	skb_queue_head_init(&tmp); -	set_user_nice(current, -20); +	set_user_nice(current, MIN_NICE);  retry:  	while (!kthread_should_stop()) { @@ -2626,14 +2633,18 @@ static int __init fcoe_init(void)  		skb_queue_head_init(&p->fcoe_rx_list);  	} +	cpu_notifier_register_begin(); +  	for_each_online_cpu(cpu)  		fcoe_percpu_thread_create(cpu);  	/* Initialize per CPU interrupt thread */ -	rc = register_hotcpu_notifier(&fcoe_cpu_notifier); +	rc = __register_hotcpu_notifier(&fcoe_cpu_notifier);  	if (rc)  		goto out_free; +	cpu_notifier_register_done(); +  	/* Setup link change notification */  	fcoe_dev_setup(); @@ -2648,6 +2659,9 @@ out_free:  	for_each_online_cpu(cpu) {  		fcoe_percpu_thread_destroy(cpu);  	} + +	cpu_notifier_register_done(); +  	mutex_unlock(&fcoe_config_mutex);  	destroy_workqueue(fcoe_wq);  	return rc; @@ -2680,11 +2694,15 @@ static void __exit fcoe_exit(void)  	}  	rtnl_unlock(); -	unregister_hotcpu_notifier(&fcoe_cpu_notifier); +	cpu_notifier_register_begin();  	for_each_online_cpu(cpu)  		fcoe_percpu_thread_destroy(cpu); +	__unregister_hotcpu_notifier(&fcoe_cpu_notifier); + +	cpu_notifier_register_done(); +  	mutex_unlock(&fcoe_config_mutex);  	/* diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 203415e0251..34a1b1f333b 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -160,74 +160,113 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)  }  EXPORT_SYMBOL(fcoe_ctlr_init); +/** + * fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} + * @new: The newly discovered FCF + * + * Called with fip->ctlr_mutex held + */  static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)  {  	struct fcoe_ctlr *fip = new->fip; -	struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); -	struct fcoe_fcf_device temp, *fcf_dev; -	int rc = 0; +	struct fcoe_ctlr_device *ctlr_dev; +	struct fcoe_fcf_device *temp, *fcf_dev; +	int rc = -ENOMEM;  	LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",  			new->fabric_name, new->fcf_mac); -	mutex_lock(&ctlr_dev->lock); - -	temp.fabric_name = new->fabric_name; -	temp.switch_name = new->switch_name; -	temp.fc_map = new->fc_map; -	temp.vfid = new->vfid; -	memcpy(temp.mac, new->fcf_mac, ETH_ALEN); -	temp.priority = new->pri; -	temp.fka_period = new->fka_period; -	temp.selected = 0; /* default to unselected */ - -	fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp); -	if (unlikely(!fcf_dev)) { -		rc = -ENOMEM; +	temp = kzalloc(sizeof(*temp), GFP_KERNEL); +	if (!temp)  		goto out; -	} + +	temp->fabric_name = new->fabric_name; +	temp->switch_name = new->switch_name; +	temp->fc_map = new->fc_map; +	temp->vfid = new->vfid; +	memcpy(temp->mac, new->fcf_mac, ETH_ALEN); +	temp->priority = new->pri; +	temp->fka_period = new->fka_period; +	temp->selected = 0; /* default to unselected */  	/* -	 * The fcoe_sysfs layer can return a CONNECTED fcf that -	 * has a priv (fcf was never deleted) or a CONNECTED fcf -	 * that doesn't have a priv (fcf was deleted). However, -	 * libfcoe will always delete FCFs before trying to add -	 * them. This is ensured because both recv_adv and -	 * age_fcfs are protected by the the fcoe_ctlr's mutex. -	 * This means that we should never get a FCF with a -	 * non-NULL priv pointer. +	 * If ctlr_dev doesn't exist then it means we're a libfcoe user +	 * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device. +	 * fnic would be an example of a driver with this behavior. In this +	 * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we +	 * don't want to make sysfs changes.  	 */ -	BUG_ON(fcf_dev->priv); -	fcf_dev->priv = new; -	new->fcf_dev = fcf_dev; +	ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); +	if (ctlr_dev) { +		mutex_lock(&ctlr_dev->lock); +		fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp); +		if (unlikely(!fcf_dev)) { +			rc = -ENOMEM; +			mutex_unlock(&ctlr_dev->lock); +			goto out; +		} + +		/* +		 * The fcoe_sysfs layer can return a CONNECTED fcf that +		 * has a priv (fcf was never deleted) or a CONNECTED fcf +		 * that doesn't have a priv (fcf was deleted). However, +		 * libfcoe will always delete FCFs before trying to add +		 * them. This is ensured because both recv_adv and +		 * age_fcfs are protected by the the fcoe_ctlr's mutex. +		 * This means that we should never get a FCF with a +		 * non-NULL priv pointer. +		 */ +		BUG_ON(fcf_dev->priv); + +		fcf_dev->priv = new; +		new->fcf_dev = fcf_dev; +		mutex_unlock(&ctlr_dev->lock); +	}  	list_add(&new->list, &fip->fcfs);  	fip->fcf_count++; +	rc = 0;  out: -	mutex_unlock(&ctlr_dev->lock); +	kfree(temp);  	return rc;  } +/** + * fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} + * @new: The FCF to be removed + * + * Called with fip->ctlr_mutex held + */  static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)  {  	struct fcoe_ctlr *fip = new->fip; -	struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip); +	struct fcoe_ctlr_device *cdev;  	struct fcoe_fcf_device *fcf_dev;  	list_del(&new->list);  	fip->fcf_count--; -	mutex_lock(&ctlr_dev->lock); - -	fcf_dev = fcoe_fcf_to_fcf_dev(new); -	WARN_ON(!fcf_dev); -	new->fcf_dev = NULL; -	fcoe_fcf_device_delete(fcf_dev); -	kfree(new); - -	mutex_unlock(&ctlr_dev->lock); +	/* +	 * If ctlr_dev doesn't exist then it means we're a libfcoe user +	 * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device +	 * or a fcoe_fcf_device. +	 * +	 * fnic would be an example of a driver with this behavior. In this +	 * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above), +	 * but we don't want to make sysfs changes. +	 */ +	cdev = fcoe_ctlr_to_ctlr_dev(fip); +	if (cdev) { +		mutex_lock(&cdev->lock); +		fcf_dev = fcoe_fcf_to_fcf_dev(new); +		WARN_ON(!fcf_dev); +		new->fcf_dev = NULL; +		fcoe_fcf_device_delete(fcf_dev); +		kfree(new); +		mutex_unlock(&cdev->lock); +	}  }  /** @@ -300,7 +339,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)  	spin_unlock_bh(&fip->ctlr_lock);  	sel = fip->sel_fcf; -	if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr)) +	if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))  		goto unlock;  	if (!is_zero_ether_addr(fip->dest_addr)) {  		printk(KERN_NOTICE "libfcoe: host%d: " @@ -1000,7 +1039,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)  		if (fcf->switch_name == new.switch_name &&  		    fcf->fabric_name == new.fabric_name &&  		    fcf->fc_map == new.fc_map && -		    compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { +		    ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) {  			found = 1;  			break;  		} @@ -1340,7 +1379,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,  			mp = (struct fip_mac_desc *)desc;  			if (dlen < sizeof(*mp))  				goto err; -			if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac)) +			if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac))  				goto err;  			desc_mask &= ~BIT(FIP_DT_MAC);  			break; @@ -1418,8 +1457,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,  			 * 'port_id' is already validated, check MAC address and  			 * wwpn  			 */ -			if (compare_ether_addr(fip->get_src_addr(vn_port), -						vp->fd_mac) != 0 || +			if (!ether_addr_equal(fip->get_src_addr(vn_port), +					      vp->fd_mac) ||  				get_unaligned_be64(&vp->fd_wwpn) !=  							vn_port->wwpn)  				continue; @@ -1453,6 +1492,9 @@ err:   */  void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)  { +	skb = skb_share_check(skb, GFP_ATOMIC); +	if (!skb) +		return;  	skb_queue_tail(&fip->fip_recv_list, skb);  	schedule_work(&fip->recv_work);  } @@ -1479,12 +1521,12 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)  		goto drop;  	eh = eth_hdr(skb);  	if (fip->mode == FIP_MODE_VN2VN) { -		if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && -		    compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) && -		    compare_ether_addr(eh->h_dest, fcoe_all_p2p)) +		if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && +		    !ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) && +		    !ether_addr_equal(eh->h_dest, fcoe_all_p2p))  			goto drop; -	} else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) && -		   compare_ether_addr(eh->h_dest, fcoe_all_enode)) +	} else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) && +		   !ether_addr_equal(eh->h_dest, fcoe_all_enode))  		goto drop;  	fiph = (struct fip_header *)skb->data;  	op = ntohs(fiph->fip_op); @@ -1856,7 +1898,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,  		 * address_mode flag to use FC_OUI-based Ethernet DA.  		 * Otherwise we use the FCoE gateway addr  		 */ -		if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) { +		if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {  			fcoe_ctlr_map_dest(fip);  		} else {  			memcpy(fip->dest_addr, sa, ETH_ALEN); @@ -2825,8 +2867,8 @@ unlock:   * disabled, so that should ensure that this routine is only called   * when nothing is happening.   */ -void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, -			enum fip_state fip_mode) +static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip, +			       enum fip_state fip_mode)  {  	void *priv; diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c index c9382d6eee7..045c4e11ee5 100644 --- a/drivers/scsi/fcoe/fcoe_sysfs.c +++ b/drivers/scsi/fcoe/fcoe_sysfs.c @@ -300,29 +300,29 @@ static ssize_t store_ctlr_mode(struct device *dev,  	switch (ctlr->enabled) {  	case FCOE_CTLR_ENABLED: -		LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled."); +		LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n");  		return -EBUSY;  	case FCOE_CTLR_DISABLED:  		if (!ctlr->f->set_fcoe_ctlr_mode) {  			LIBFCOE_SYSFS_DBG(ctlr, -					  "Mode change not supported by LLD."); +					  "Mode change not supported by LLD.\n");  			return -ENOTSUPP;  		}  		ctlr->mode = fcoe_parse_mode(mode);  		if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) { -			LIBFCOE_SYSFS_DBG(ctlr, -					  "Unknown mode %s provided.", buf); +			LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n", +					  buf);  			return -EINVAL;  		}  		ctlr->f->set_fcoe_ctlr_mode(ctlr); -		LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf); +		LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);  		return count;  	case FCOE_CTLR_UNUSED:  	default: -		LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported."); +		LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n");  		return -ENOTSUPP;  	};  } @@ -553,16 +553,20 @@ static struct device_type fcoe_fcf_device_type = {  	.release = fcoe_fcf_device_release,  }; -static struct bus_attribute fcoe_bus_attr_group[] = { -	__ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store), -	__ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store), -	__ATTR_NULL +static BUS_ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store); +static BUS_ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store); + +static struct attribute *fcoe_bus_attrs[] = { +	&bus_attr_ctlr_create.attr, +	&bus_attr_ctlr_destroy.attr, +	NULL,  }; +ATTRIBUTE_GROUPS(fcoe_bus);  static struct bus_type fcoe_bus_type = {  	.name = "fcoe",  	.match = &fcoe_bus_match, -	.bus_attrs = fcoe_bus_attr_group, +	.bus_groups = fcoe_bus_groups,  };  /** @@ -653,7 +657,7 @@ static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,  	if (new->switch_name == old->switch_name &&  	    new->fabric_name == old->fabric_name &&  	    new->fc_map == old->fc_map && -	    compare_ether_addr(new->mac, old->mac) == 0) +	    ether_addr_equal(new->mac, old->mac))  		return 1;  	return 0;  } diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index e4dd3d7cd23..1d3521e13d7 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -27,6 +27,7 @@  #include "fnic_io.h"  #include "fnic_res.h"  #include "fnic_trace.h" +#include "fnic_stats.h"  #include "vnic_dev.h"  #include "vnic_wq.h"  #include "vnic_rq.h" @@ -38,14 +39,15 @@  #define DRV_NAME		"fnic"  #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver" -#define DRV_VERSION		"1.5.0.23" +#define DRV_VERSION		"1.6.0.10"  #define PFX			DRV_NAME ": "  #define DFX                     DRV_NAME "%d: "  #define DESC_CLEAN_LOW_WATERMARK 8  #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD	16 /* UCSM default throttle count */  #define FNIC_MIN_IO_REQ			256 /* Min IO throttle count */ -#define FNIC_MAX_IO_REQ		2048 /* scsi_cmnd tag map entries */ +#define FNIC_MAX_IO_REQ		1024 /* scsi_cmnd tag map entries */ +#define FNIC_DFLT_IO_REQ        256 /* Default scsi_cmnd tag map entries */  #define	FNIC_IO_LOCKS		64 /* IO locks: power of 2 */  #define FNIC_DFLT_QUEUE_DEPTH	32  #define	FNIC_STATS_RATE_LIMIT	4 /* limit rate at which stats are pulled up */ @@ -232,6 +234,13 @@ struct fnic {  	unsigned int wq_count;  	unsigned int cq_count; +	struct dentry *fnic_stats_debugfs_host; +	struct dentry *fnic_stats_debugfs_file; +	struct dentry *fnic_reset_debugfs_file; +	unsigned int reset_stats; +	atomic64_t io_cmpl_skip; +	struct fnic_stats fnic_stats; +  	u32 vlan_hw_insert:1;	        /* let hw insert the tag */  	u32 in_remove:1;                /* fnic device in removal */  	u32 stop_rx_link_events:1;      /* stop proc. rx frames, link events */ diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index cbcb0121c84..2c613bdea78 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -23,14 +23,97 @@  static struct dentry *fnic_trace_debugfs_root;  static struct dentry *fnic_trace_debugfs_file;  static struct dentry *fnic_trace_enable; +static struct dentry *fnic_stats_debugfs_root; + +static struct dentry *fnic_fc_trace_debugfs_file; +static struct dentry *fnic_fc_rdata_trace_debugfs_file; +static struct dentry *fnic_fc_trace_enable; +static struct dentry *fnic_fc_trace_clear; + +struct fc_trace_flag_type { +	u8 fc_row_file; +	u8 fc_normal_file; +	u8 fnic_trace; +	u8 fc_trace; +	u8 fc_clear; +}; + +static struct fc_trace_flag_type *fc_trc_flag; + +/* + * fnic_debugfs_init - Initialize debugfs for fnic debug logging + * + * Description: + * When Debugfs is configured this routine sets up the fnic debugfs + * file system. If not already created, this routine will create the + * fnic directory and statistics directory for trace buffer and + * stats logging. + */ +int fnic_debugfs_init(void) +{ +	int rc = -1; +	fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); +	if (!fnic_trace_debugfs_root) { +		printk(KERN_DEBUG "Cannot create debugfs root\n"); +		return rc; +	} + +	if (!fnic_trace_debugfs_root) { +		printk(KERN_DEBUG +			"fnic root directory doesn't exist in debugfs\n"); +		return rc; +	} + +	fnic_stats_debugfs_root = debugfs_create_dir("statistics", +						fnic_trace_debugfs_root); +	if (!fnic_stats_debugfs_root) { +		printk(KERN_DEBUG "Cannot create Statistics directory\n"); +		return rc; +	} + +	/* Allocate memory to structure */ +	fc_trc_flag = (struct fc_trace_flag_type *) +		vmalloc(sizeof(struct fc_trace_flag_type)); + +	if (fc_trc_flag) { +		fc_trc_flag->fc_row_file = 0; +		fc_trc_flag->fc_normal_file = 1; +		fc_trc_flag->fnic_trace = 2; +		fc_trc_flag->fc_trace = 3; +		fc_trc_flag->fc_clear = 4; +	} + +	rc = 0; +	return rc; +} + +/* + * fnic_debugfs_terminate - Tear down debugfs infrastructure + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic. + */ +void fnic_debugfs_terminate(void) +{ +	debugfs_remove(fnic_stats_debugfs_root); +	fnic_stats_debugfs_root = NULL; + +	debugfs_remove(fnic_trace_debugfs_root); +	fnic_trace_debugfs_root = NULL; + +	if (fc_trc_flag) +		vfree(fc_trc_flag); +}  /* - * fnic_trace_ctrl_open - Open the trace_enable file + * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace + *               Or Open fc_trace_enable file for fc_trace   * @inode: The inode pointer.   * @file: The file pointer to attach the trace enable/disable flag.   *   * Description: - * This routine opens a debugsfs file trace_enable. + * This routine opens a debugsfs file trace_enable or fc_trace_enable.   *   * Returns:   * This function returns zero if successful. @@ -42,15 +125,19 @@ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)  }  /* - * fnic_trace_ctrl_read - Read a trace_enable debugfs file + * fnic_trace_ctrl_read - + *          Read  trace_enable ,fc_trace_enable + *              or fc_trace_clear debugfs file   * @filp: The file pointer to read from.   * @ubuf: The buffer to copy the data to.   * @cnt: The number of bytes to read.   * @ppos: The position in the file to start reading from.   *   * Description: - * This routine reads value of variable fnic_tracing_enabled - * and stores into local @buf. It will start reading file at @ppos and + * This routine reads value of variable fnic_tracing_enabled or + * fnic_fc_tracing_enabled or fnic_fc_trace_cleared + * and stores into local @buf. + * It will start reading file at @ppos and   * copy up to @cnt of data to @ubuf from @buf.   *   * Returns: @@ -62,13 +149,25 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,  {  	char buf[64];  	int len; -	len = sprintf(buf, "%u\n", fnic_tracing_enabled); +	u8 *trace_type; +	len = 0; +	trace_type = (u8 *)filp->private_data; +	if (*trace_type == fc_trc_flag->fnic_trace) +		len = sprintf(buf, "%u\n", fnic_tracing_enabled); +	else if (*trace_type == fc_trc_flag->fc_trace) +		len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled); +	else if (*trace_type == fc_trc_flag->fc_clear) +		len = sprintf(buf, "%u\n", fnic_fc_trace_cleared); +	else +		pr_err("fnic: Cannot read to any debugfs file\n");  	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);  }  /* - * fnic_trace_ctrl_write - Write to trace_enable debugfs file + * fnic_trace_ctrl_write - + * Write to trace_enable, fc_trace_enable or + *         fc_trace_clear debugfs file   * @filp: The file pointer to write from.   * @ubuf: The buffer to copy the data from.   * @cnt: The number of bytes to write. @@ -76,7 +175,8 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,   *   * Description:   * This routine writes data from user buffer @ubuf to buffer @buf and - * sets fnic_tracing_enabled value as per user input. + * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared + * value as per user input.   *   * Returns:   * This function returns the amount of data that was written. @@ -88,6 +188,8 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,  	char buf[64];  	unsigned long val;  	int ret; +	u8 *trace_type; +	trace_type = (u8 *)filp->private_data;  	if (cnt >= sizeof(buf))  		return -EINVAL; @@ -101,12 +203,27 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,  	if (ret < 0)  		return ret; -	fnic_tracing_enabled = val; +	if (*trace_type == fc_trc_flag->fnic_trace) +		fnic_tracing_enabled = val; +	else if (*trace_type == fc_trc_flag->fc_trace) +		fnic_fc_tracing_enabled = val; +	else if (*trace_type == fc_trc_flag->fc_clear) +		fnic_fc_trace_cleared = val; +	else +		pr_err("fnic: cannot write to any debufs file\n"); +  	(*ppos)++;  	return cnt;  } +static const struct file_operations fnic_trace_ctrl_fops = { +	.owner = THIS_MODULE, +	.open = fnic_trace_ctrl_open, +	.read = fnic_trace_ctrl_read, +	.write = fnic_trace_ctrl_write, +}; +  /*   * fnic_trace_debugfs_open - Open the fnic trace log   * @inode: The inode pointer @@ -126,19 +243,36 @@ static int fnic_trace_debugfs_open(struct inode *inode,  				  struct file *file)  {  	fnic_dbgfs_t *fnic_dbg_prt; +	u8 *rdata_ptr; +	rdata_ptr = (u8 *)inode->i_private;  	fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);  	if (!fnic_dbg_prt)  		return -ENOMEM; -	fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE))); -	if (!fnic_dbg_prt->buffer) { -		kfree(fnic_dbg_prt); -		return -ENOMEM; +	if (*rdata_ptr == fc_trc_flag->fnic_trace) { +		fnic_dbg_prt->buffer = vmalloc(3 * +					(trace_max_pages * PAGE_SIZE)); +		if (!fnic_dbg_prt->buffer) { +			kfree(fnic_dbg_prt); +			return -ENOMEM; +		} +		memset((void *)fnic_dbg_prt->buffer, 0, +		3 * (trace_max_pages * PAGE_SIZE)); +		fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt); +	} else { +		fnic_dbg_prt->buffer = +			vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); +		if (!fnic_dbg_prt->buffer) { +			kfree(fnic_dbg_prt); +			return -ENOMEM; +		} +		memset((void *)fnic_dbg_prt->buffer, 0, +			3 * (fnic_fc_trace_max_pages * PAGE_SIZE)); +		fnic_dbg_prt->buffer_len = +			fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr);  	} -	memset((void *)fnic_dbg_prt->buffer, 0, -			  (3*(trace_max_pages * PAGE_SIZE))); -	fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);  	file->private_data = fnic_dbg_prt; +  	return 0;  } @@ -220,13 +354,6 @@ static int fnic_trace_debugfs_release(struct inode *inode,  	return 0;  } -static const struct file_operations fnic_trace_ctrl_fops = { -	.owner = THIS_MODULE, -	.open = fnic_trace_ctrl_open, -	.read = fnic_trace_ctrl_read, -	.write = fnic_trace_ctrl_write, -}; -  static const struct file_operations fnic_trace_debugfs_fops = {  	.owner = THIS_MODULE,  	.open = fnic_trace_debugfs_open, @@ -241,37 +368,39 @@ static const struct file_operations fnic_trace_debugfs_fops = {   * Description:   * When Debugfs is configured this routine sets up the fnic debugfs   * file system. If not already created, this routine will create the - * fnic directory. It will create file trace to log fnic trace buffer - * output into debugfs and it will also create file trace_enable to - * control enable/disable of trace logging into trace buffer. + * create file trace to log fnic trace buffer output into debugfs and + * it will also create file trace_enable to control enable/disable of + * trace logging into trace buffer.   */  int fnic_trace_debugfs_init(void)  {  	int rc = -1; -	fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);  	if (!fnic_trace_debugfs_root) { -		printk(KERN_DEBUG "Cannot create debugfs root\n"); +		printk(KERN_DEBUG +			"FNIC Debugfs root directory doesn't exist\n");  		return rc;  	}  	fnic_trace_enable = debugfs_create_file("tracing_enable", -					  S_IFREG|S_IRUGO|S_IWUSR, -					  fnic_trace_debugfs_root, -					  NULL, &fnic_trace_ctrl_fops); +					S_IFREG|S_IRUGO|S_IWUSR, +					fnic_trace_debugfs_root, +					&(fc_trc_flag->fnic_trace), +					&fnic_trace_ctrl_fops);  	if (!fnic_trace_enable) { -		printk(KERN_DEBUG "Cannot create trace_enable file" -				  " under debugfs"); +		printk(KERN_DEBUG +			"Cannot create trace_enable file under debugfs\n");  		return rc;  	}  	fnic_trace_debugfs_file = debugfs_create_file("trace", -						  S_IFREG|S_IRUGO|S_IWUSR, -						  fnic_trace_debugfs_root, -						  NULL, -						  &fnic_trace_debugfs_fops); +					S_IFREG|S_IRUGO|S_IWUSR, +					fnic_trace_debugfs_root, +					&(fc_trc_flag->fnic_trace), +					&fnic_trace_debugfs_fops);  	if (!fnic_trace_debugfs_file) { -		printk(KERN_DEBUG "Cannot create trace file under debugfs"); +		printk(KERN_DEBUG +			"Cannot create trace file under debugfs\n");  		return rc;  	}  	rc = 0; @@ -287,16 +416,421 @@ int fnic_trace_debugfs_init(void)   */  void fnic_trace_debugfs_terminate(void)  { -	if (fnic_trace_debugfs_file) { -		debugfs_remove(fnic_trace_debugfs_file); -		fnic_trace_debugfs_file = NULL; +	debugfs_remove(fnic_trace_debugfs_file); +	fnic_trace_debugfs_file = NULL; + +	debugfs_remove(fnic_trace_enable); +	fnic_trace_enable = NULL; +} + +/* + * fnic_fc_trace_debugfs_init - + * Initialize debugfs for fnic control frame trace logging + * + * Description: + * When Debugfs is configured this routine sets up the fnic_fc debugfs + * file system. If not already created, this routine will create the + * create file trace to log fnic fc trace buffer output into debugfs and + * it will also create file fc_trace_enable to control enable/disable of + * trace logging into trace buffer. + */ + +int fnic_fc_trace_debugfs_init(void) +{ +	int rc = -1; + +	if (!fnic_trace_debugfs_root) { +		pr_err("fnic:Debugfs root directory doesn't exist\n"); +		return rc; +	} + +	fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable", +					S_IFREG|S_IRUGO|S_IWUSR, +					fnic_trace_debugfs_root, +					&(fc_trc_flag->fc_trace), +					&fnic_trace_ctrl_fops); + +	if (!fnic_fc_trace_enable) { +		pr_err("fnic: Failed create fc_trace_enable file\n"); +		return rc; +	} + +	fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear", +					S_IFREG|S_IRUGO|S_IWUSR, +					fnic_trace_debugfs_root, +					&(fc_trc_flag->fc_clear), +					&fnic_trace_ctrl_fops); + +	if (!fnic_fc_trace_clear) { +		pr_err("fnic: Failed to create fc_trace_enable file\n"); +		return rc; +	} + +	fnic_fc_rdata_trace_debugfs_file = +		debugfs_create_file("fc_trace_rdata", +				    S_IFREG|S_IRUGO|S_IWUSR, +				    fnic_trace_debugfs_root, +				    &(fc_trc_flag->fc_normal_file), +				    &fnic_trace_debugfs_fops); + +	if (!fnic_fc_rdata_trace_debugfs_file) { +		pr_err("fnic: Failed create fc_rdata_trace file\n"); +		return rc; +	} + +	fnic_fc_trace_debugfs_file = +		debugfs_create_file("fc_trace", +				    S_IFREG|S_IRUGO|S_IWUSR, +				    fnic_trace_debugfs_root, +				    &(fc_trc_flag->fc_row_file), +				    &fnic_trace_debugfs_fops); + +	if (!fnic_fc_trace_debugfs_file) { +		pr_err("fnic: Failed to create fc_trace file\n"); +		return rc; +	} +	rc = 0; +	return rc; +} + +/* + * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic_fc trace logging. + */ + +void fnic_fc_trace_debugfs_terminate(void) +{ +	debugfs_remove(fnic_fc_trace_debugfs_file); +	fnic_fc_trace_debugfs_file = NULL; + +	debugfs_remove(fnic_fc_rdata_trace_debugfs_file); +	fnic_fc_rdata_trace_debugfs_file = NULL; + +	debugfs_remove(fnic_fc_trace_enable); +	fnic_fc_trace_enable = NULL; + +	debugfs_remove(fnic_fc_trace_clear); +	fnic_fc_trace_clear = NULL; +} + +/* + * fnic_reset_stats_open - Open the reset_stats file + * @inode: The inode pointer. + * @file: The file pointer to attach the stats reset flag. + * + * Description: + * This routine opens a debugsfs file reset_stats and stores i_private data + * to debug structure to retrieve later for while performing other + * file oprations. + * + * Returns: + * This function returns zero if successful. + */ +static int fnic_reset_stats_open(struct inode *inode, struct file *file) +{ +	struct stats_debug_info *debug; + +	debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); +	if (!debug) +		return -ENOMEM; + +	debug->i_private = inode->i_private; + +	file->private_data = debug; + +	return 0; +} + +/* + * fnic_reset_stats_read - Read a reset_stats debugfs file + * @filp: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @cnt: The number of bytes to read. + * @ppos: The position in the file to start reading from. + * + * Description: + * This routine reads value of variable reset_stats + * and stores into local @buf. It will start reading file at @ppos and + * copy up to @cnt of data to @ubuf from @buf. + * + * Returns: + * This function returns the amount of data that was read. + */ +static ssize_t fnic_reset_stats_read(struct file *file, +					char __user *ubuf, +					size_t cnt, loff_t *ppos) +{ +	struct stats_debug_info *debug = file->private_data; +	struct fnic *fnic = (struct fnic *)debug->i_private; +	char buf[64]; +	int len; + +	len = sprintf(buf, "%u\n", fnic->reset_stats); + +	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); +} + +/* + * fnic_reset_stats_write - Write to reset_stats debugfs file + * @filp: The file pointer to write from. + * @ubuf: The buffer to copy the data from. + * @cnt: The number of bytes to write. + * @ppos: The position in the file to start writing to. + * + * Description: + * This routine writes data from user buffer @ubuf to buffer @buf and + * resets cumulative stats of fnic. + * + * Returns: + * This function returns the amount of data that was written. + */ +static ssize_t fnic_reset_stats_write(struct file *file, +					const char __user *ubuf, +					size_t cnt, loff_t *ppos) +{ +	struct stats_debug_info *debug = file->private_data; +	struct fnic *fnic = (struct fnic *)debug->i_private; +	struct fnic_stats *stats = &fnic->fnic_stats; +	u64 *io_stats_p = (u64 *)&stats->io_stats; +	u64 *fw_stats_p = (u64 *)&stats->fw_stats; +	char buf[64]; +	unsigned long val; +	int ret; + +	if (cnt >= sizeof(buf)) +		return -EINVAL; + +	if (copy_from_user(&buf, ubuf, cnt)) +		return -EFAULT; + +	buf[cnt] = 0; + +	ret = kstrtoul(buf, 10, &val); +	if (ret < 0) +		return ret; + +	fnic->reset_stats = val; + +	if (fnic->reset_stats) { +		/* Skip variable is used to avoid descrepancies to Num IOs +		 * and IO Completions stats. Skip incrementing No IO Compls +		 * for pending active IOs after reset stats +		 */ +		atomic64_set(&fnic->io_cmpl_skip, +			atomic64_read(&stats->io_stats.active_ios)); +		memset(&stats->abts_stats, 0, sizeof(struct abort_stats)); +		memset(&stats->term_stats, 0, +			sizeof(struct terminate_stats)); +		memset(&stats->reset_stats, 0, sizeof(struct reset_stats)); +		memset(&stats->misc_stats, 0, sizeof(struct misc_stats)); +		memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats)); +		memset(io_stats_p+1, 0, +			sizeof(struct io_path_stats) - sizeof(u64)); +		memset(fw_stats_p+1, 0, +			sizeof(struct fw_stats) - sizeof(u64));  	} -	if (fnic_trace_enable) { -		debugfs_remove(fnic_trace_enable); -		fnic_trace_enable = NULL; + +	(*ppos)++; +	return cnt; +} + +/* + * fnic_reset_stats_release - Release the buffer used to store + * debugfs file data + * @inode: The inode pointer + * @file: The file pointer that contains the buffer to release + * + * Description: + * This routine frees the buffer that was allocated when the debugfs + * file was opened. + * + * Returns: + * This function returns zero. + */ +static int fnic_reset_stats_release(struct inode *inode, +					struct file *file) +{ +	struct stats_debug_info *debug = file->private_data; +	kfree(debug); +	return 0; +} + +/* + * fnic_stats_debugfs_open - Open the stats file for specific host + * and get fnic stats. + * @inode: The inode pointer. + * @file: The file pointer to attach the specific host statistics. + * + * Description: + * This routine opens a debugsfs file stats of specific host and print + * fnic stats. + * + * Returns: + * This function returns zero if successful. + */ +static int fnic_stats_debugfs_open(struct inode *inode, +					struct file *file) +{ +	struct fnic *fnic = inode->i_private; +	struct fnic_stats *fnic_stats = &fnic->fnic_stats; +	struct stats_debug_info *debug; +	int buf_size = 2 * PAGE_SIZE; + +	debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL); +	if (!debug) +		return -ENOMEM; + +	debug->debug_buffer = vmalloc(buf_size); +	if (!debug->debug_buffer) { +		kfree(debug); +		return -ENOMEM;  	} -	if (fnic_trace_debugfs_root) { -		debugfs_remove(fnic_trace_debugfs_root); -		fnic_trace_debugfs_root = NULL; + +	debug->buf_size = buf_size; +	memset((void *)debug->debug_buffer, 0, buf_size); +	debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); + +	file->private_data = debug; + +	return 0; +} + +/* + * fnic_stats_debugfs_read - Read a debugfs file + * @file: The file pointer to read from. + * @ubuf: The buffer to copy the data to. + * @nbytes: The number of bytes to read. + * @pos: The position in the file to start reading from. + * + * Description: + * This routine reads data from the buffer indicated in the private_data + * field of @file. It will start reading at @pos and copy up to @nbytes of + * data to @ubuf. + * + * Returns: + * This function returns the amount of data that was read (this could be + * less than @nbytes if the end of the file was reached). + */ +static ssize_t fnic_stats_debugfs_read(struct file *file, +					char __user *ubuf, +					size_t nbytes, +					loff_t *pos) +{ +	struct stats_debug_info *debug = file->private_data; +	int rc = 0; +	rc = simple_read_from_buffer(ubuf, nbytes, pos, +					debug->debug_buffer, +					debug->buffer_len); +	return rc; +} + +/* + * fnic_stats_stats_release - Release the buffer used to store + * debugfs file data + * @inode: The inode pointer + * @file: The file pointer that contains the buffer to release + * + * Description: + * This routine frees the buffer that was allocated when the debugfs + * file was opened. + * + * Returns: + * This function returns zero. + */ +static int fnic_stats_debugfs_release(struct inode *inode, +					struct file *file) +{ +	struct stats_debug_info *debug = file->private_data; +	vfree(debug->debug_buffer); +	kfree(debug); +	return 0; +} + +static const struct file_operations fnic_stats_debugfs_fops = { +	.owner = THIS_MODULE, +	.open = fnic_stats_debugfs_open, +	.read = fnic_stats_debugfs_read, +	.release = fnic_stats_debugfs_release, +}; + +static const struct file_operations fnic_reset_debugfs_fops = { +	.owner = THIS_MODULE, +	.open = fnic_reset_stats_open, +	.read = fnic_reset_stats_read, +	.write = fnic_reset_stats_write, +	.release = fnic_reset_stats_release, +}; + +/* + * fnic_stats_init - Initialize stats struct and create stats file per fnic + * + * Description: + * When Debugfs is configured this routine sets up the stats file per fnic + * It will create file stats and reset_stats under statistics/host# directory + * to log per fnic stats. + */ +int fnic_stats_debugfs_init(struct fnic *fnic) +{ +	int rc = -1; +	char name[16]; + +	snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); + +	if (!fnic_stats_debugfs_root) { +		printk(KERN_DEBUG "fnic_stats root doesn't exist\n"); +		return rc; +	} +	fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, +						fnic_stats_debugfs_root); +	if (!fnic->fnic_stats_debugfs_host) { +		printk(KERN_DEBUG "Cannot create host directory\n"); +		return rc; +	} + +	fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", +						S_IFREG|S_IRUGO|S_IWUSR, +						fnic->fnic_stats_debugfs_host, +						fnic, +						&fnic_stats_debugfs_fops); +	if (!fnic->fnic_stats_debugfs_file) { +		printk(KERN_DEBUG "Cannot create host stats file\n"); +		return rc; +	} + +	fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", +						S_IFREG|S_IRUGO|S_IWUSR, +						fnic->fnic_stats_debugfs_host, +						fnic, +						&fnic_reset_debugfs_fops); +	if (!fnic->fnic_reset_debugfs_file) { +		printk(KERN_DEBUG "Cannot create host stats file\n"); +		return rc;  	} +	rc = 0; +	return rc; +} + +/* + * fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats + * + * Description: + * When Debugfs is configured this routine removes debugfs file system + * elements that are specific to fnic stats. + */ +void fnic_stats_debugfs_remove(struct fnic *fnic) +{ +	if (!fnic) +		return; + +	debugfs_remove(fnic->fnic_stats_debugfs_file); +	fnic->fnic_stats_debugfs_file = NULL; + +	debugfs_remove(fnic->fnic_reset_debugfs_file); +	fnic->fnic_reset_debugfs_file = NULL; + +	debugfs_remove(fnic->fnic_stats_debugfs_host); +	fnic->fnic_stats_debugfs_host = NULL;  } diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 006fa92a02d..1b948f633fc 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -66,19 +66,35 @@ void fnic_handle_link(struct work_struct *work)  	fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);  	if (old_link_status == fnic->link_status) { -		if (!fnic->link_status) +		if (!fnic->link_status) {  			/* DOWN -> DOWN */  			spin_unlock_irqrestore(&fnic->fnic_lock, flags); -		else { +			fnic_fc_trace_set_data(fnic->lport->host->host_no, +				FNIC_FC_LE, "Link Status: DOWN->DOWN", +				strlen("Link Status: DOWN->DOWN")); +		} else {  			if (old_link_down_cnt != fnic->link_down_cnt) {  				/* UP -> DOWN -> UP */  				fnic->lport->host_stats.link_failure_count++;  				spin_unlock_irqrestore(&fnic->fnic_lock, flags); +				fnic_fc_trace_set_data( +					fnic->lport->host->host_no, +					FNIC_FC_LE, +					"Link Status:UP_DOWN_UP", +					strlen("Link_Status:UP_DOWN_UP") +					);  				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,  					     "link down\n");  				fcoe_ctlr_link_down(&fnic->ctlr);  				if (fnic->config.flags & VFCF_FIP_CAPABLE) {  					/* start FCoE VLAN discovery */ +					fnic_fc_trace_set_data( +						fnic->lport->host->host_no, +						FNIC_FC_LE, +						"Link Status: UP_DOWN_UP_VLAN", +						strlen( +						"Link Status: UP_DOWN_UP_VLAN") +						);  					fnic_fcoe_send_vlan_req(fnic);  					return;  				} @@ -88,22 +104,36 @@ void fnic_handle_link(struct work_struct *work)  			} else  				/* UP -> UP */  				spin_unlock_irqrestore(&fnic->fnic_lock, flags); +				fnic_fc_trace_set_data( +					fnic->lport->host->host_no, FNIC_FC_LE, +					"Link Status: UP_UP", +					strlen("Link Status: UP_UP"));  		}  	} else if (fnic->link_status) {  		/* DOWN -> UP */  		spin_unlock_irqrestore(&fnic->fnic_lock, flags);  		if (fnic->config.flags & VFCF_FIP_CAPABLE) {  			/* start FCoE VLAN discovery */ +				fnic_fc_trace_set_data( +				fnic->lport->host->host_no, +				FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", +				strlen("Link Status: DOWN_UP_VLAN"));  			fnic_fcoe_send_vlan_req(fnic);  			return;  		}  		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); +		fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, +			"Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));  		fcoe_ctlr_link_up(&fnic->ctlr);  	} else {  		/* UP -> DOWN */  		fnic->lport->host_stats.link_failure_count++;  		spin_unlock_irqrestore(&fnic->fnic_lock, flags);  		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); +		fnic_fc_trace_set_data( +			fnic->lport->host->host_no, FNIC_FC_LE, +			"Link Status: UP_DOWN", +			strlen("Link Status: UP_DOWN"));  		fcoe_ctlr_link_down(&fnic->ctlr);  	} @@ -267,11 +297,6 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,  	if (desc->fip_dtype == FIP_DT_FLOGI) { -		shost_printk(KERN_DEBUG, lport->host, -			  " FIP TYPE FLOGI: fab name:%llx " -			  "vfid:%d map:%x\n", -			  fip->sel_fcf->fabric_name, fip->sel_fcf->vfid, -			  fip->sel_fcf->fc_map);  		if (dlen < sizeof(*els) + sizeof(*fh) + 1)  			return 0; @@ -302,6 +327,7 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,  static void fnic_fcoe_send_vlan_req(struct fnic *fnic)  {  	struct fcoe_ctlr *fip = &fnic->ctlr; +	struct fnic_stats *fnic_stats = &fnic->fnic_stats;  	struct sk_buff *skb;  	char *eth_fr;  	int fr_len; @@ -337,6 +363,7 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)  	vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;  	vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;  	put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); +	atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);  	skb_put(skb, sizeof(*vlan));  	skb->protocol = htons(ETH_P_FIP); @@ -354,6 +381,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)  	struct fcoe_ctlr *fip = &fnic->ctlr;  	struct fip_header *fiph;  	struct fip_desc *desc; +	struct fnic_stats *fnic_stats = &fnic->fnic_stats;  	u16 vid;  	size_t rlen;  	size_t dlen; @@ -402,6 +430,7 @@ static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)  	/* any VLAN descriptors present ? */  	if (list_empty(&fnic->vlans)) {  		/* retry from timer */ +		atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);  		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,  			  "No VLAN descriptors in FIP VLAN response\n");  		spin_unlock_irqrestore(&fnic->vlans_lock, flags); @@ -533,6 +562,7 @@ drop:  void fnic_handle_fip_frame(struct work_struct *work)  {  	struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); +	struct fnic_stats *fnic_stats = &fnic->fnic_stats;  	unsigned long flags;  	struct sk_buff *skb;  	struct ethhdr *eh; @@ -567,6 +597,8 @@ void fnic_handle_fip_frame(struct work_struct *work)  			 * fcf's & restart from scratch  			 */  			if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { +				atomic64_inc( +					&fnic_stats->vlan_stats.flogi_rejects);  				shost_printk(KERN_INFO, fnic->lport->host,  					  "Trigger a Link down - VLAN Disc\n");  				fcoe_ctlr_link_down(&fnic->ctlr); @@ -609,6 +641,10 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)  					"using UCSM\n");  			goto drop;  		} +		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, +			FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) { +			printk(KERN_ERR "fnic ctlr frame trace error!!!"); +		}  		skb_queue_tail(&fnic->fip_frame_queue, skb);  		queue_work(fnic_fip_queue, &fnic->fip_frame_work);  		return 1;		/* let caller know packet was used */ @@ -651,13 +687,13 @@ void fnic_update_mac_locked(struct fnic *fnic, u8 *new)  	if (is_zero_ether_addr(new))  		new = ctl; -	if (!compare_ether_addr(data, new)) +	if (ether_addr_equal(data, new))  		return;  	FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new); -	if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl)) +	if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))  		vnic_dev_del_addr(fnic->vdev, data);  	memcpy(data, new, ETH_ALEN); -	if (compare_ether_addr(new, ctl)) +	if (!ether_addr_equal(new, ctl))  		vnic_dev_add_addr(fnic->vdev, new);  } @@ -753,6 +789,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc  	struct fnic *fnic = vnic_dev_priv(rq->vdev);  	struct sk_buff *skb;  	struct fc_frame *fp; +	struct fnic_stats *fnic_stats = &fnic->fnic_stats;  	unsigned int eth_hdrs_stripped;  	u8 type, color, eop, sop, ingress_port, vlan_stripped;  	u8 fcoe = 0, fcoe_sof, fcoe_eof; @@ -803,6 +840,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc  		eth_hdrs_stripped = 0;  		skb_trim(skb, bytes_written);  		if (!fcs_ok) { +			atomic64_inc(&fnic_stats->misc_stats.frame_errors);  			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,  				     "fcs error.  dropping packet.\n");  			goto drop; @@ -818,6 +856,7 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc  	}  	if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { +		atomic64_inc(&fnic_stats->misc_stats.frame_errors);  		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,  			     "fnic rq_cmpl fcoe x%x fcsok x%x"  			     " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" @@ -834,6 +873,10 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc  	}  	fr_dev(fp) = fnic->lport;  	spin_unlock_irqrestore(&fnic->fnic_lock, flags); +	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, +					(char *)skb->data, skb->len)) != 0) { +		printk(KERN_ERR "fnic ctlr frame trace error!!!"); +	}  	skb_queue_tail(&fnic->frame_queue, skb);  	queue_work(fnic_event_queue, &fnic->frame_work); @@ -941,6 +984,15 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)  		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);  		vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;  		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); +		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, +			FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) { +			printk(KERN_ERR "fnic ctlr frame trace error!!!"); +		} +	} else { +		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, +			FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) { +			printk(KERN_ERR "fnic ctlr frame trace error!!!"); +		}  	}  	pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); @@ -1013,6 +1065,11 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)  	pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE); +	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, +				(char *)eth_hdr, tot_len)) != 0) { +		printk(KERN_ERR "fnic ctlr frame trace error!!!"); +	} +  	spin_lock_irqsave(&fnic->wq_lock[0], flags);  	if (!vnic_wq_desc_avail(wq)) { @@ -1205,6 +1262,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)  {  	unsigned long flags;  	struct fcoe_vlan *vlan; +	struct fnic_stats *fnic_stats = &fnic->fnic_stats;  	u64 sol_time;  	spin_lock_irqsave(&fnic->fnic_lock, flags); @@ -1273,6 +1331,7 @@ void fnic_handle_fip_timer(struct fnic *fnic)  			vlan->state = FIP_VLAN_SENT; /* sent now */  		}  		spin_unlock_irqrestore(&fnic->vlans_lock, flags); +		atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);  		vlan->sol_count++;  		sol_time = jiffies + msecs_to_jiffies  					(FCOE_CTLR_START_DELAY); diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index 5c1f223cabc..7d9b54ae7f6 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -37,6 +37,9 @@ static irqreturn_t fnic_isr_legacy(int irq, void *data)  	if (!pba)  		return IRQ_NONE; +	fnic->fnic_stats.misc_stats.last_isr_time = jiffies; +	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); +  	if (pba & (1 << FNIC_INTX_NOTIFY)) {  		vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);  		fnic_handle_link_event(fnic); @@ -66,6 +69,9 @@ static irqreturn_t fnic_isr_msi(int irq, void *data)  	struct fnic *fnic = data;  	unsigned long work_done = 0; +	fnic->fnic_stats.misc_stats.last_isr_time = jiffies; +	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); +  	work_done += fnic_wq_copy_cmpl_handler(fnic, -1);  	work_done += fnic_wq_cmpl_handler(fnic, -1);  	work_done += fnic_rq_cmpl_handler(fnic, -1); @@ -83,6 +89,9 @@ static irqreturn_t fnic_isr_msix_rq(int irq, void *data)  	struct fnic *fnic = data;  	unsigned long rq_work_done = 0; +	fnic->fnic_stats.misc_stats.last_isr_time = jiffies; +	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); +  	rq_work_done = fnic_rq_cmpl_handler(fnic, -1);  	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],  				 rq_work_done, @@ -97,6 +106,9 @@ static irqreturn_t fnic_isr_msix_wq(int irq, void *data)  	struct fnic *fnic = data;  	unsigned long wq_work_done = 0; +	fnic->fnic_stats.misc_stats.last_isr_time = jiffies; +	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); +  	wq_work_done = fnic_wq_cmpl_handler(fnic, -1);  	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],  				 wq_work_done, @@ -110,6 +122,9 @@ static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)  	struct fnic *fnic = data;  	unsigned long wq_copy_work_done = 0; +	fnic->fnic_stats.misc_stats.last_isr_time = jiffies; +	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); +  	wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1);  	vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],  				 wq_copy_work_done, @@ -122,6 +137,9 @@ static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)  {  	struct fnic *fnic = data; +	fnic->fnic_stats.misc_stats.last_isr_time = jiffies; +	atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count); +  	vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);  	fnic_log_q_error(fnic);  	fnic_handle_link_event(fnic); diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index bbf81ea3a25..8c56fdc3a45 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -74,6 +74,11 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);  MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "  					"for fnic trace buffer"); +unsigned int fnic_fc_trace_max_pages = 64; +module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(fnic_fc_trace_max_pages, +		 "Total allocated memory pages for fc trace buffer"); +  static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;  module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);  MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); @@ -111,7 +116,7 @@ static struct scsi_host_template fnic_host_template = {  	.change_queue_type = fc_change_queue_type,  	.this_id = -1,  	.cmd_per_lun = 3, -	.can_queue = FNIC_MAX_IO_REQ, +	.can_queue = FNIC_DFLT_IO_REQ,  	.use_clustering = ENABLE_CLUSTERING,  	.sg_tablesize = FNIC_MAX_SG_DESC_CNT,  	.max_sectors = 0xffff, @@ -556,6 +561,13 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  	host->transportt = fnic_fc_transport; +	err = fnic_stats_debugfs_init(fnic); +	if (err) { +		shost_printk(KERN_ERR, fnic->lport->host, +				"Failed to initialize debugfs for stats\n"); +		fnic_stats_debugfs_remove(fnic); +	} +  	/* Setup PCI resources */  	pci_set_drvdata(pdev, fnic); @@ -766,6 +778,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)  		shost_printk(KERN_INFO, fnic->lport->host,  			     "firmware uses non-FIP mode\n");  		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); +		fnic->ctlr.state = FIP_ST_NON_FIP;  	}  	fnic->state = FNIC_IN_FC_MODE; @@ -917,6 +930,7 @@ err_out_release_regions:  err_out_disable_device:  	pci_disable_device(pdev);  err_out_free_hba: +	fnic_stats_debugfs_remove(fnic);  	scsi_host_put(lp->host);  err_out:  	return err; @@ -969,6 +983,7 @@ static void fnic_remove(struct pci_dev *pdev)  	fcoe_ctlr_destroy(&fnic->ctlr);  	fc_lport_destroy(lp); +	fnic_stats_debugfs_remove(fnic);  	/*  	 * This stops the fnic device, masks all interrupts. Completed @@ -996,7 +1011,6 @@ static void fnic_remove(struct pci_dev *pdev)  	fnic_iounmap(fnic);  	pci_release_regions(pdev);  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  	scsi_host_put(lp->host);  } @@ -1014,14 +1028,31 @@ static int __init fnic_init_module(void)  	printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); +	/* Create debugfs entries for fnic */ +	err = fnic_debugfs_init(); +	if (err < 0) { +		printk(KERN_ERR PFX "Failed to create fnic directory " +				"for tracing and stats logging\n"); +		fnic_debugfs_terminate(); +	} +  	/* Allocate memory for trace buffer */  	err = fnic_trace_buf_init();  	if (err < 0) { -		printk(KERN_ERR PFX "Trace buffer initialization Failed " -				  "Fnic Tracing utility is disabled\n"); +		printk(KERN_ERR PFX +		       "Trace buffer initialization Failed. " +		       "Fnic Tracing utility is disabled\n");  		fnic_trace_free();  	} +    /* Allocate memory for fc trace buffer */ +	err = fnic_fc_trace_init(); +	if (err < 0) { +		printk(KERN_ERR PFX "FC trace buffer initialization Failed " +		       "FC frame tracing utility is disabled\n"); +		fnic_fc_trace_free(); +	} +  	/* Create a cache for allocation of default size sgls */  	len = sizeof(struct fnic_dflt_sgl_list);  	fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create @@ -1102,6 +1133,8 @@ err_create_fnic_sgl_slab_max:  	kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);  err_create_fnic_sgl_slab_dflt:  	fnic_trace_free(); +	fnic_fc_trace_free(); +	fnic_debugfs_terminate();  	return err;  } @@ -1118,6 +1151,8 @@ static void __exit fnic_cleanup_module(void)  	kmem_cache_destroy(fnic_io_req_cache);  	fc_release_transport(fnic_fc_transport);  	fnic_trace_free(); +	fnic_fc_trace_free(); +	fnic_debugfs_terminate();  }  module_init(fnic_init_module); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index d014aae1913..ea28b5ca4c7 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -226,15 +226,23 @@ int fnic_fw_reset_handler(struct fnic *fnic)  	if (!vnic_wq_copy_desc_avail(wq))  		ret = -EAGAIN; -	else +	else {  		fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); +		atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); +		if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > +			  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) +			atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, +				atomic64_read( +				  &fnic->fnic_stats.fw_stats.active_fw_reqs)); +	}  	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); -	if (!ret) +	if (!ret) { +		atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);  		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  			      "Issued fw reset\n"); -	else { +	} else {  		fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);  		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  			      "Failed to issue fw reset\n"); @@ -291,6 +299,12 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)  			      fc_id, fnic->ctlr.map_dest, gw_mac);  	} +	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); +	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > +		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) +		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, +		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); +  flogi_reg_ioreq_end:  	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);  	return ret; @@ -310,6 +324,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,  	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));  	struct fc_rport_libfc_priv *rp = rport->dd_data;  	struct host_sg_desc *desc; +	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;  	u8 pri_tag = 0;  	unsigned int i;  	unsigned long intr_flags; @@ -358,6 +373,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,  		spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);  		FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,  			  "fnic_queue_wq_copy_desc failure - no descriptors\n"); +		atomic64_inc(&misc_stats->io_cpwq_alloc_failures);  		return SCSI_MLQUEUE_HOST_BUSY;  	} @@ -386,6 +402,12 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,  					 rport->maxframe_size, rp->r_a_tov,  					 rp->e_d_tov); +	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); +	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > +		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) +		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, +		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); +  	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);  	return 0;  } @@ -401,6 +423,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_  	struct fc_rport *rport;  	struct fnic_io_req *io_req = NULL;  	struct fnic *fnic = lport_priv(lp); +	struct fnic_stats *fnic_stats = &fnic->fnic_stats;  	struct vnic_wq_copy *wq;  	int ret;  	u64 cmd_trace; @@ -414,6 +437,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_  	rport = starget_to_rport(scsi_target(sc->device));  	ret = fc_remote_port_chkready(rport);  	if (ret) { +		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);  		sc->result = ret;  		done(sc);  		return 0; @@ -436,6 +460,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_  	/* Get a new io_req for this SCSI IO */  	io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);  	if (!io_req) { +		atomic64_inc(&fnic_stats->io_stats.alloc_failures);  		ret = SCSI_MLQUEUE_HOST_BUSY;  		goto out;  	} @@ -462,6 +487,7 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_  			mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],  				      GFP_ATOMIC);  		if (!io_req->sgl_list) { +			atomic64_inc(&fnic_stats->io_stats.alloc_failures);  			ret = SCSI_MLQUEUE_HOST_BUSY;  			scsi_dma_unmap(sc);  			mempool_free(io_req, fnic->io_req_pool); @@ -509,6 +535,13 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_  			mempool_free(io_req, fnic->io_req_pool);  		}  	} else { +		atomic64_inc(&fnic_stats->io_stats.active_ios); +		atomic64_inc(&fnic_stats->io_stats.num_ios); +		if (atomic64_read(&fnic_stats->io_stats.active_ios) > +			  atomic64_read(&fnic_stats->io_stats.max_active_ios)) +			atomic64_set(&fnic_stats->io_stats.max_active_ios, +			     atomic64_read(&fnic_stats->io_stats.active_ios)); +  		/* REVISIT: Use per IO lock in the final code */  		CMD_FLAGS(sc) |= FNIC_IO_ISSUED;  	} @@ -542,12 +575,18 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,  	struct fcpio_tag tag;  	int ret = 0;  	unsigned long flags; +	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;  	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); +	atomic64_inc(&reset_stats->fw_reset_completions); +  	/* Clean up all outstanding io requests */  	fnic_cleanup_io(fnic, SCSI_NO_TAG); +	atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); +	atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); +  	spin_lock_irqsave(&fnic->fnic_lock, flags);  	/* fnic should be in FC_TRANS_ETH_MODE */ @@ -571,6 +610,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,  			 * reset the firmware. Free the cached flogi  			 */  			fnic->state = FNIC_IN_FC_MODE; +			atomic64_inc(&reset_stats->fw_reset_failures);  			ret = -1;  		}  	} else { @@ -578,6 +618,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,  			      fnic->lport->host,  			      "Unexpected state %s while processing"  			      " reset cmpl\n", fnic_state_to_str(fnic->state)); +		atomic64_inc(&reset_stats->fw_reset_failures);  		ret = -1;  	} @@ -701,10 +742,14 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,  	wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];  	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); +	fnic->fnic_stats.misc_stats.last_ack_time = jiffies;  	if (is_ack_index_in_range(wq, request_out)) {  		fnic->fw_ack_index[0] = request_out;  		fnic->fw_ack_recd[0] = 1; -	} +	} else +		atomic64_inc( +			&fnic->fnic_stats.misc_stats.ack_index_out_of_range); +  	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);  	FNIC_TRACE(fnic_fcpio_ack_handler,  		  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], @@ -726,6 +771,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,  	struct fcpio_icmnd_cmpl *icmnd_cmpl;  	struct fnic_io_req *io_req;  	struct scsi_cmnd *sc; +	struct fnic_stats *fnic_stats = &fnic->fnic_stats;  	unsigned long flags;  	spinlock_t *io_lock;  	u64 cmd_trace; @@ -746,6 +792,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,  	sc = scsi_host_find_tag(fnic->lport->host, id);  	WARN_ON_ONCE(!sc);  	if (!sc) { +		atomic64_inc(&fnic_stats->io_stats.sc_null);  		shost_printk(KERN_ERR, fnic->lport->host,  			  "icmnd_cmpl sc is null - "  			  "hdr status = %s tag = 0x%x desc = 0x%p\n", @@ -766,6 +813,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,  	io_req = (struct fnic_io_req *)CMD_SP(sc);  	WARN_ON_ONCE(!io_req);  	if (!io_req) { +		atomic64_inc(&fnic_stats->io_stats.ioreq_null);  		CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;  		spin_unlock_irqrestore(io_lock, flags);  		shost_printk(KERN_ERR, fnic->lport->host, @@ -824,31 +872,54 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,  		if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)  			xfer_len -= icmnd_cmpl->residual; +		if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) +			atomic64_inc(&fnic_stats->misc_stats.queue_fulls);  		break;  	case FCPIO_TIMEOUT:          /* request was timed out */ +		atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);  		sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;  		break;  	case FCPIO_ABORTED:          /* request was aborted */ +		atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);  		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;  		break;  	case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */ +		atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);  		scsi_set_resid(sc, icmnd_cmpl->residual);  		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;  		break;  	case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */ +		atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);  		sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;  		break; -	case FCPIO_INVALID_HEADER:   /* header contains invalid data */ -	case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */ -	case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */ +  	case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */ +		atomic64_inc(&fnic_stats->io_stats.io_not_found); +		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; +		break; +  	case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */ -	case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */ +		atomic64_inc(&fnic_stats->misc_stats.sgl_invalid); +		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; +		break; +  	case FCPIO_FW_ERR:           /* request was terminated due fw error */ +		atomic64_inc(&fnic_stats->fw_stats.io_fw_errs); +		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; +		break; + +	case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */ +		atomic64_inc(&fnic_stats->misc_stats.mss_invalid); +		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status; +		break; + +	case FCPIO_INVALID_HEADER:   /* header contains invalid data */ +	case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */ +	case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */  	default:  		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",  			     fnic_fcpio_status_to_str(hdr_status)); @@ -856,6 +927,11 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,  		break;  	} +	if (hdr_status != FCPIO_SUCCESS) { +		atomic64_inc(&fnic_stats->io_stats.io_failures); +		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", +			     fnic_fcpio_status_to_str(hdr_status)); +	}  	/* Break link with the SCSI command */  	CMD_SP(sc) = NULL;  	CMD_FLAGS(sc) |= FNIC_IO_DONE; @@ -889,6 +965,12 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,  	} else  		fnic->lport->host_stats.fcp_control_requests++; +	atomic64_dec(&fnic_stats->io_stats.active_ios); +	if (atomic64_read(&fnic->io_cmpl_skip)) +		atomic64_dec(&fnic->io_cmpl_skip); +	else +		atomic64_inc(&fnic_stats->io_stats.io_completions); +  	/* Call SCSI completion function to complete the IO */  	if (sc->scsi_done)  		sc->scsi_done(sc); @@ -906,6 +988,10 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,  	u32 id;  	struct scsi_cmnd *sc;  	struct fnic_io_req *io_req; +	struct fnic_stats *fnic_stats = &fnic->fnic_stats; +	struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats; +	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; +	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;  	unsigned long flags;  	spinlock_t *io_lock;  	unsigned long start_time; @@ -923,6 +1009,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,  	sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);  	WARN_ON_ONCE(!sc);  	if (!sc) { +		atomic64_inc(&fnic_stats->io_stats.sc_null);  		shost_printk(KERN_ERR, fnic->lport->host,  			  "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",  			  fnic_fcpio_status_to_str(hdr_status), id); @@ -933,6 +1020,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,  	io_req = (struct fnic_io_req *)CMD_SP(sc);  	WARN_ON_ONCE(!io_req);  	if (!io_req) { +		atomic64_inc(&fnic_stats->io_stats.ioreq_null);  		spin_unlock_irqrestore(io_lock, flags);  		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;  		shost_printk(KERN_ERR, fnic->lport->host, @@ -957,6 +1045,31 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,  		spin_unlock_irqrestore(io_lock, flags);  	} else if (id & FNIC_TAG_ABORT) {  		/* Completion of abort cmd */ +		switch (hdr_status) { +		case FCPIO_SUCCESS: +			break; +		case FCPIO_TIMEOUT: +			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) +				atomic64_inc(&abts_stats->abort_fw_timeouts); +			else +				atomic64_inc( +					&term_stats->terminate_fw_timeouts); +			break; +		case FCPIO_IO_NOT_FOUND: +			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) +				atomic64_inc(&abts_stats->abort_io_not_found); +			else +				atomic64_inc( +					&term_stats->terminate_io_not_found); +			break; +		default: +			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED) +				atomic64_inc(&abts_stats->abort_failures); +			else +				atomic64_inc( +					&term_stats->terminate_failures); +			break; +		}  		if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {  			/* This is a late completion. Ignore it */  			spin_unlock_irqrestore(io_lock, flags); @@ -964,6 +1077,16 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,  		}  		CMD_ABTS_STATUS(sc) = hdr_status;  		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; + +		atomic64_dec(&fnic_stats->io_stats.active_ios); +		if (atomic64_read(&fnic->io_cmpl_skip)) +			atomic64_dec(&fnic->io_cmpl_skip); +		else +			atomic64_inc(&fnic_stats->io_stats.io_completions); + +		if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) +			atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); +  		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  			      "abts cmpl recd. id %d status %s\n",  			      (int)(id & FNIC_TAG_MASK), @@ -1067,6 +1190,18 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,  	struct fnic *fnic = vnic_dev_priv(vdev);  	switch (desc->hdr.type) { +	case FCPIO_ICMND_CMPL: /* fw completed a command */ +	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/ +	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */ +	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */ +	case FCPIO_RESET_CMPL: /* fw completed reset */ +		atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs); +		break; +	default: +		break; +	} + +	switch (desc->hdr.type) {  	case FCPIO_ACK: /* fw copied copy wq desc to its queue */  		fnic_fcpio_ack_handler(fnic, cq_index, desc);  		break; @@ -1126,6 +1261,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)  	struct scsi_cmnd *sc;  	spinlock_t *io_lock;  	unsigned long start_time = 0; +	struct fnic_stats *fnic_stats = &fnic->fnic_stats;  	for (i = 0; i < fnic->fnic_max_tag_id; i++) {  		if (i == exclude_id) @@ -1176,8 +1312,14 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)  cleanup_scsi_cmd:  		sc->result = DID_TRANSPORT_DISRUPTED << 16; -		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:" -			      " DID_TRANSPORT_DISRUPTED\n"); +		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, +			      "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n", +			      __func__, (jiffies - start_time)); + +		if (atomic64_read(&fnic->io_cmpl_skip)) +			atomic64_dec(&fnic->io_cmpl_skip); +		else +			atomic64_inc(&fnic_stats->io_stats.io_completions);  		/* Complete the command to SCSI */  		if (sc->scsi_done) { @@ -1262,6 +1404,7 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,  {  	struct vnic_wq_copy *wq = &fnic->wq_copy[0];  	struct Scsi_Host *host = fnic->lport->host; +	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;  	unsigned long flags;  	spin_lock_irqsave(host->host_lock, flags); @@ -1283,12 +1426,19 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,  		atomic_dec(&fnic->in_flight);  		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  			"fnic_queue_abort_io_req: failure: no descriptors\n"); +		atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);  		return 1;  	}  	fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,  				     0, task_req, tag, fc_lun, io_req->port_id,  				     fnic->config.ra_tov, fnic->config.ed_tov); +	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); +	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > +		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) +		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, +		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); +  	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);  	atomic_dec(&fnic->in_flight); @@ -1299,10 +1449,13 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)  {  	int tag;  	int abt_tag; +	int term_cnt = 0;  	struct fnic_io_req *io_req;  	spinlock_t *io_lock;  	unsigned long flags;  	struct scsi_cmnd *sc; +	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; +	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;  	struct scsi_lun fc_lun;  	enum fnic_ioreq_state old_ioreq_state; @@ -1366,6 +1519,7 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)  		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;  		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;  		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { +			atomic64_inc(&reset_stats->device_reset_terminates);  			abt_tag = (tag | FNIC_TAG_DEV_RST);  			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  			"fnic_rport_exch_reset dev rst sc 0x%p\n", @@ -1402,8 +1556,12 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)  			else  				CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;  			spin_unlock_irqrestore(io_lock, flags); +			atomic64_inc(&term_stats->terminates); +			term_cnt++;  		}  	} +	if (term_cnt > atomic64_read(&term_stats->max_terminates)) +		atomic64_set(&term_stats->max_terminates, term_cnt);  } @@ -1411,6 +1569,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)  {  	int tag;  	int abt_tag; +	int term_cnt = 0;  	struct fnic_io_req *io_req;  	spinlock_t *io_lock;  	unsigned long flags; @@ -1420,6 +1579,8 @@ void fnic_terminate_rport_io(struct fc_rport *rport)  	struct fc_lport *lport;  	struct fnic *fnic;  	struct fc_rport *cmd_rport; +	struct reset_stats *reset_stats; +	struct terminate_stats *term_stats;  	enum fnic_ioreq_state old_ioreq_state;  	if (!rport) { @@ -1448,6 +1609,9 @@ void fnic_terminate_rport_io(struct fc_rport *rport)  	if (fnic->in_remove)  		return; +	reset_stats = &fnic->fnic_stats.reset_stats; +	term_stats = &fnic->fnic_stats.term_stats; +  	for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) {  		abt_tag = tag;  		io_lock = fnic_io_lock_tag(fnic, tag); @@ -1504,6 +1668,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)  		CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;  		CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;  		if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) { +			atomic64_inc(&reset_stats->device_reset_terminates);  			abt_tag = (tag | FNIC_TAG_DEV_RST);  			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  			"fnic_terminate_rport_io dev rst sc 0x%p\n", sc); @@ -1540,8 +1705,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport)  			else  				CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;  			spin_unlock_irqrestore(io_lock, flags); +			atomic64_inc(&term_stats->terminates); +			term_cnt++;  		}  	} +	if (term_cnt > atomic64_read(&term_stats->max_terminates)) +		atomic64_set(&term_stats->max_terminates, term_cnt);  } @@ -1562,6 +1731,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)  	int ret = SUCCESS;  	u32 task_req = 0;  	struct scsi_lun fc_lun; +	struct fnic_stats *fnic_stats; +	struct abort_stats *abts_stats; +	struct terminate_stats *term_stats; +	enum fnic_ioreq_state old_ioreq_state;  	int tag;  	DECLARE_COMPLETION_ONSTACK(tm_done); @@ -1572,6 +1745,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)  	lp = shost_priv(sc->device->host);  	fnic = lport_priv(lp); +	fnic_stats = &fnic->fnic_stats; +	abts_stats = &fnic->fnic_stats.abts_stats; +	term_stats = &fnic->fnic_stats.term_stats; +  	rport = starget_to_rport(scsi_target(sc->device));  	tag = sc->request->tag;  	FNIC_SCSI_DBG(KERN_DEBUG, @@ -1618,6 +1795,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)  	 * the completion wont be done till mid-layer, since abort  	 * has already started.  	 */ +	old_ioreq_state = CMD_STATE(sc);  	CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;  	CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE; @@ -1630,8 +1808,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)  	 */  	if (fc_remote_port_chkready(rport) == 0)  		task_req = FCPIO_ITMF_ABT_TASK; -	else +	else { +		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);  		task_req = FCPIO_ITMF_ABT_TASK_TERM; +	}  	/* Now queue the abort command to firmware */  	int_to_scsilun(sc->device->lun, &fc_lun); @@ -1639,6 +1819,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)  	if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,  				    fc_lun.scsi_lun, io_req)) {  		spin_lock_irqsave(io_lock, flags); +		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) +			CMD_STATE(sc) = old_ioreq_state;  		io_req = (struct fnic_io_req *)CMD_SP(sc);  		if (io_req)  			io_req->abts_done = NULL; @@ -1646,10 +1828,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)  		ret = FAILED;  		goto fnic_abort_cmd_end;  	} -	if (task_req == FCPIO_ITMF_ABT_TASK) +	if (task_req == FCPIO_ITMF_ABT_TASK) {  		CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED; -	else +		atomic64_inc(&fnic_stats->abts_stats.aborts); +	} else {  		CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED; +		atomic64_inc(&fnic_stats->term_stats.terminates); +	}  	/*  	 * We queued an abort IO, wait for its completion. @@ -1667,6 +1852,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)  	io_req = (struct fnic_io_req *)CMD_SP(sc);  	if (!io_req) { +		atomic64_inc(&fnic_stats->io_stats.ioreq_null);  		spin_unlock_irqrestore(io_lock, flags);  		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;  		ret = FAILED; @@ -1677,6 +1863,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)  	/* fw did not complete abort, timed out */  	if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {  		spin_unlock_irqrestore(io_lock, flags); +		if (task_req == FCPIO_ITMF_ABT_TASK) { +			atomic64_inc(&abts_stats->abort_drv_timeouts); +		} else { +			atomic64_inc(&term_stats->terminate_drv_timeouts); +		}  		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;  		ret = FAILED;  		goto fnic_abort_cmd_end; @@ -1721,6 +1912,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,  {  	struct vnic_wq_copy *wq = &fnic->wq_copy[0];  	struct Scsi_Host *host = fnic->lport->host; +	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;  	struct scsi_lun fc_lun;  	int ret = 0;  	unsigned long intr_flags; @@ -1742,6 +1934,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,  	if (!vnic_wq_copy_desc_avail(wq)) {  		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  			  "queue_dr_io_req failure - no descriptors\n"); +		atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);  		ret = -EAGAIN;  		goto lr_io_req_end;  	} @@ -1754,6 +1947,12 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,  				     fc_lun.scsi_lun, io_req->port_id,  				     fnic->config.ra_tov, fnic->config.ed_tov); +	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); +	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > +		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs)) +		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs, +		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs)); +  lr_io_req_end:  	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);  	atomic_dec(&fnic->in_flight); @@ -1988,6 +2187,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)  	unsigned long flags;  	unsigned long start_time = 0;  	struct scsi_lun fc_lun; +	struct fnic_stats *fnic_stats; +	struct reset_stats *reset_stats;  	int tag = 0;  	DECLARE_COMPLETION_ONSTACK(tm_done);  	int tag_gen_flag = 0;   /*to track tags allocated by fnic driver*/ @@ -1999,6 +2200,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)  	lp = shost_priv(sc->device->host);  	fnic = lport_priv(lp); +	fnic_stats = &fnic->fnic_stats; +	reset_stats = &fnic->fnic_stats.reset_stats; + +	atomic64_inc(&reset_stats->device_resets);  	rport = starget_to_rport(scsi_target(sc->device));  	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, @@ -2009,8 +2214,10 @@ int fnic_device_reset(struct scsi_cmnd *sc)  		goto fnic_device_reset_end;  	/* Check if remote port up */ -	if (fc_remote_port_chkready(rport)) +	if (fc_remote_port_chkready(rport)) { +		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);  		goto fnic_device_reset_end; +	}  	CMD_FLAGS(sc) = FNIC_DEVICE_RESET;  	/* Allocate tag if not present */ @@ -2086,6 +2293,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)  	 * gets cleaned up during higher levels of EH  	 */  	if (status == FCPIO_INVALID_CODE) { +		atomic64_inc(&reset_stats->device_reset_timeouts);  		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  			      "Device reset timed out\n");  		CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT; @@ -2199,6 +2407,10 @@ fnic_device_reset_end:  		      "Returning from device reset %s\n",  		      (ret == SUCCESS) ?  		      "SUCCESS" : "FAILED"); + +	if (ret == FAILED) +		atomic64_inc(&reset_stats->device_reset_failures); +  	return ret;  } @@ -2207,26 +2419,34 @@ int fnic_reset(struct Scsi_Host *shost)  {  	struct fc_lport *lp;  	struct fnic *fnic; -	int ret = SUCCESS; +	int ret = 0; +	struct reset_stats *reset_stats;  	lp = shost_priv(shost);  	fnic = lport_priv(lp); +	reset_stats = &fnic->fnic_stats.reset_stats;  	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  		      "fnic_reset called\n"); +	atomic64_inc(&reset_stats->fnic_resets); +  	/*  	 * Reset local port, this will clean up libFC exchanges,  	 * reset remote port sessions, and if link is up, begin flogi  	 */ -	if (lp->tt.lport_reset(lp)) -		ret = FAILED; +	ret = lp->tt.lport_reset(lp);  	FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,  		      "Returning from fnic reset %s\n", -		      (ret == SUCCESS) ? +		      (ret == 0) ?  		      "SUCCESS" : "FAILED"); +	if (ret == 0) +		atomic64_inc(&reset_stats->fnic_reset_completions); +	else +		atomic64_inc(&reset_stats->fnic_reset_failures); +  	return ret;  } @@ -2251,7 +2471,7 @@ int fnic_host_reset(struct scsi_cmnd *sc)  	 * scsi-ml tries to send a TUR to every device if host reset is  	 * successful, so before returning to scsi, fabric should be up  	 */ -	ret = fnic_reset(shost); +	ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;  	if (ret == SUCCESS) {  		wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;  		ret = FAILED; diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h new file mode 100644 index 00000000000..540cceb843c --- /dev/null +++ b/drivers/scsi/fnic/fnic_stats.h @@ -0,0 +1,116 @@ +/* + * Copyright 2013 Cisco Systems, Inc.  All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef _FNIC_STATS_H_ +#define _FNIC_STATS_H_ +struct io_path_stats { +	atomic64_t active_ios; +	atomic64_t max_active_ios; +	atomic64_t io_completions; +	atomic64_t io_failures; +	atomic64_t ioreq_null; +	atomic64_t alloc_failures; +	atomic64_t sc_null; +	atomic64_t io_not_found; +	atomic64_t num_ios; +}; + +struct abort_stats { +	atomic64_t aborts; +	atomic64_t abort_failures; +	atomic64_t abort_drv_timeouts; +	atomic64_t abort_fw_timeouts; +	atomic64_t abort_io_not_found; +}; + +struct terminate_stats { +	atomic64_t terminates; +	atomic64_t max_terminates; +	atomic64_t terminate_drv_timeouts; +	atomic64_t terminate_fw_timeouts; +	atomic64_t terminate_io_not_found; +	atomic64_t terminate_failures; +}; + +struct reset_stats { +	atomic64_t device_resets; +	atomic64_t device_reset_failures; +	atomic64_t device_reset_aborts; +	atomic64_t device_reset_timeouts; +	atomic64_t device_reset_terminates; +	atomic64_t fw_resets; +	atomic64_t fw_reset_completions; +	atomic64_t fw_reset_failures; +	atomic64_t fnic_resets; +	atomic64_t fnic_reset_completions; +	atomic64_t fnic_reset_failures; +}; + +struct fw_stats { +	atomic64_t active_fw_reqs; +	atomic64_t max_fw_reqs; +	atomic64_t fw_out_of_resources; +	atomic64_t io_fw_errs; +}; + +struct vlan_stats { +	atomic64_t vlan_disc_reqs; +	atomic64_t resp_withno_vlanID; +	atomic64_t sol_expiry_count; +	atomic64_t flogi_rejects; +}; + +struct misc_stats { +	u64 last_isr_time; +	u64 last_ack_time; +	atomic64_t isr_count; +	atomic64_t max_cq_entries; +	atomic64_t ack_index_out_of_range; +	atomic64_t data_count_mismatch; +	atomic64_t fcpio_timeout; +	atomic64_t fcpio_aborted; +	atomic64_t sgl_invalid; +	atomic64_t mss_invalid; +	atomic64_t abts_cpwq_alloc_failures; +	atomic64_t devrst_cpwq_alloc_failures; +	atomic64_t io_cpwq_alloc_failures; +	atomic64_t no_icmnd_itmf_cmpls; +	atomic64_t queue_fulls; +	atomic64_t rport_not_ready; +	atomic64_t frame_errors; +}; + +struct fnic_stats { +	struct io_path_stats io_stats; +	struct abort_stats abts_stats; +	struct terminate_stats term_stats; +	struct reset_stats reset_stats; +	struct fw_stats fw_stats; +	struct vlan_stats vlan_stats; +	struct misc_stats misc_stats; +}; + +struct stats_debug_info { +	char *debug_buffer; +	void *i_private; +	int buf_size; +	int buffer_len; +}; + +int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); +int fnic_stats_debugfs_init(struct fnic *); +void fnic_stats_debugfs_remove(struct fnic *); +#endif /* _FNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index 23a60e3d852..c7728592682 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -20,6 +20,7 @@  #include <linux/errno.h>  #include <linux/spinlock.h>  #include <linux/kallsyms.h> +#include <linux/time.h>  #include "fnic_io.h"  #include "fnic.h" @@ -32,6 +33,16 @@ static DEFINE_SPINLOCK(fnic_trace_lock);  static fnic_trace_dbg_t fnic_trace_entries;  int fnic_tracing_enabled = 1; +/* static char *fnic_fc_ctlr_trace_buf_p; */ + +static int fc_trace_max_entries; +static unsigned long fnic_fc_ctlr_trace_buf_p; +static fnic_trace_dbg_t fc_trace_entries; +int fnic_fc_tracing_enabled = 1; +int fnic_fc_trace_cleared = 1; +static DEFINE_SPINLOCK(fnic_fc_trace_lock); + +  /*   * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information   * @@ -189,6 +200,191 @@ int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)  }  /* + * fnic_get_stats_data - Copy fnic stats buffer to a memory file + * @fnic_dbgfs_t: pointer to debugfs fnic stats buffer + * + * Description: + * This routine gathers the fnic stats debugfs data from the fnic_stats struct + * and dumps it to stats_debug_info. + * + * Return Value: + * This routine returns the amount of bytes that were dumped into + * stats_debug_info + */ +int fnic_get_stats_data(struct stats_debug_info *debug, +			struct fnic_stats *stats) +{ +	int len = 0; +	int buf_size = debug->buf_size; +	struct timespec val1, val2; + +	len = snprintf(debug->debug_buffer + len, buf_size - len, +		  "------------------------------------------\n" +		  "\t\tIO Statistics\n" +		  "------------------------------------------\n"); +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "Number of Active IOs: %lld\nMaximum Active IOs: %lld\n" +		  "Number of IOs: %lld\nNumber of IO Completions: %lld\n" +		  "Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n" +		  "Number of Memory alloc Failures: %lld\n" +		  "Number of IOREQ Null: %lld\n" +		  "Number of SCSI cmd pointer Null: %lld\n", +		  (u64)atomic64_read(&stats->io_stats.active_ios), +		  (u64)atomic64_read(&stats->io_stats.max_active_ios), +		  (u64)atomic64_read(&stats->io_stats.num_ios), +		  (u64)atomic64_read(&stats->io_stats.io_completions), +		  (u64)atomic64_read(&stats->io_stats.io_failures), +		  (u64)atomic64_read(&stats->io_stats.io_not_found), +		  (u64)atomic64_read(&stats->io_stats.alloc_failures), +		  (u64)atomic64_read(&stats->io_stats.ioreq_null), +		  (u64)atomic64_read(&stats->io_stats.sc_null)); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "\n------------------------------------------\n" +		  "\t\tAbort Statistics\n" +		  "------------------------------------------\n"); +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "Number of Aborts: %lld\n" +		  "Number of Abort Failures: %lld\n" +		  "Number of Abort Driver Timeouts: %lld\n" +		  "Number of Abort FW Timeouts: %lld\n" +		  "Number of Abort IO NOT Found: %lld\n", +		  (u64)atomic64_read(&stats->abts_stats.aborts), +		  (u64)atomic64_read(&stats->abts_stats.abort_failures), +		  (u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts), +		  (u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts), +		  (u64)atomic64_read(&stats->abts_stats.abort_io_not_found)); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "\n------------------------------------------\n" +		  "\t\tTerminate Statistics\n" +		  "------------------------------------------\n"); +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "Number of Terminates: %lld\n" +		  "Maximum Terminates: %lld\n" +		  "Number of Terminate Driver Timeouts: %lld\n" +		  "Number of Terminate FW Timeouts: %lld\n" +		  "Number of Terminate IO NOT Found: %lld\n" +		  "Number of Terminate Failures: %lld\n", +		  (u64)atomic64_read(&stats->term_stats.terminates), +		  (u64)atomic64_read(&stats->term_stats.max_terminates), +		  (u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts), +		  (u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts), +		  (u64)atomic64_read(&stats->term_stats.terminate_io_not_found), +		  (u64)atomic64_read(&stats->term_stats.terminate_failures)); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "\n------------------------------------------\n" +		  "\t\tReset Statistics\n" +		  "------------------------------------------\n"); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "Number of Device Resets: %lld\n" +		  "Number of Device Reset Failures: %lld\n" +		  "Number of Device Reset Aborts: %lld\n" +		  "Number of Device Reset Timeouts: %lld\n" +		  "Number of Device Reset Terminates: %lld\n" +		  "Number of FW Resets: %lld\n" +		  "Number of FW Reset Completions: %lld\n" +		  "Number of FW Reset Failures: %lld\n" +		  "Number of Fnic Reset: %lld\n" +		  "Number of Fnic Reset Completions: %lld\n" +		  "Number of Fnic Reset Failures: %lld\n", +		  (u64)atomic64_read(&stats->reset_stats.device_resets), +		  (u64)atomic64_read(&stats->reset_stats.device_reset_failures), +		  (u64)atomic64_read(&stats->reset_stats.device_reset_aborts), +		  (u64)atomic64_read(&stats->reset_stats.device_reset_timeouts), +		  (u64)atomic64_read( +			  &stats->reset_stats.device_reset_terminates), +		  (u64)atomic64_read(&stats->reset_stats.fw_resets), +		  (u64)atomic64_read(&stats->reset_stats.fw_reset_completions), +		  (u64)atomic64_read(&stats->reset_stats.fw_reset_failures), +		  (u64)atomic64_read(&stats->reset_stats.fnic_resets), +		  (u64)atomic64_read( +			  &stats->reset_stats.fnic_reset_completions), +		  (u64)atomic64_read(&stats->reset_stats.fnic_reset_failures)); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "\n------------------------------------------\n" +		  "\t\tFirmware Statistics\n" +		  "------------------------------------------\n"); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "Number of Active FW Requests %lld\n" +		  "Maximum FW Requests: %lld\n" +		  "Number of FW out of resources: %lld\n" +		  "Number of FW IO errors: %lld\n", +		  (u64)atomic64_read(&stats->fw_stats.active_fw_reqs), +		  (u64)atomic64_read(&stats->fw_stats.max_fw_reqs), +		  (u64)atomic64_read(&stats->fw_stats.fw_out_of_resources), +		  (u64)atomic64_read(&stats->fw_stats.io_fw_errs)); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "\n------------------------------------------\n" +		  "\t\tVlan Discovery Statistics\n" +		  "------------------------------------------\n"); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "Number of Vlan Discovery Requests Sent %lld\n" +		  "Vlan Response Received with no FCF VLAN ID: %lld\n" +		  "No solicitations recvd after vlan set, expiry count: %lld\n" +		  "Flogi rejects count: %lld\n", +		  (u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs), +		  (u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID), +		  (u64)atomic64_read(&stats->vlan_stats.sol_expiry_count), +		  (u64)atomic64_read(&stats->vlan_stats.flogi_rejects)); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "\n------------------------------------------\n" +		  "\t\tOther Important Statistics\n" +		  "------------------------------------------\n"); + +	jiffies_to_timespec(stats->misc_stats.last_isr_time, &val1); +	jiffies_to_timespec(stats->misc_stats.last_ack_time, &val2); + +	len += snprintf(debug->debug_buffer + len, buf_size - len, +		  "Last ISR time: %llu (%8lu.%8lu)\n" +		  "Last ACK time: %llu (%8lu.%8lu)\n" +		  "Number of ISRs: %lld\n" +		  "Maximum CQ Entries: %lld\n" +		  "Number of ACK index out of range: %lld\n" +		  "Number of data count mismatch: %lld\n" +		  "Number of FCPIO Timeouts: %lld\n" +		  "Number of FCPIO Aborted: %lld\n" +		  "Number of SGL Invalid: %lld\n" +		  "Number of Copy WQ Alloc Failures for ABTs: %lld\n" +		  "Number of Copy WQ Alloc Failures for Device Reset: %lld\n" +		  "Number of Copy WQ Alloc Failures for IOs: %lld\n" +		  "Number of no icmnd itmf Completions: %lld\n" +		  "Number of QUEUE Fulls: %lld\n" +		  "Number of rport not ready: %lld\n" +		  "Number of receive frame errors: %lld\n", +		  (u64)stats->misc_stats.last_isr_time, +		  val1.tv_sec, val1.tv_nsec, +		  (u64)stats->misc_stats.last_ack_time, +		  val2.tv_sec, val2.tv_nsec, +		  (u64)atomic64_read(&stats->misc_stats.isr_count), +		  (u64)atomic64_read(&stats->misc_stats.max_cq_entries), +		  (u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range), +		  (u64)atomic64_read(&stats->misc_stats.data_count_mismatch), +		  (u64)atomic64_read(&stats->misc_stats.fcpio_timeout), +		  (u64)atomic64_read(&stats->misc_stats.fcpio_aborted), +		  (u64)atomic64_read(&stats->misc_stats.sgl_invalid), +		  (u64)atomic64_read( +			  &stats->misc_stats.abts_cpwq_alloc_failures), +		  (u64)atomic64_read( +			  &stats->misc_stats.devrst_cpwq_alloc_failures), +		  (u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures), +		  (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), +		  (u64)atomic64_read(&stats->misc_stats.queue_fulls), +		  (u64)atomic64_read(&stats->misc_stats.rport_not_ready), +		  (u64)atomic64_read(&stats->misc_stats.frame_errors)); + +	return len; + +} + +/*   * fnic_trace_buf_init - Initialize fnic trace buffer logging facility   *   * Description: @@ -243,10 +439,10 @@ int fnic_trace_buf_init(void)  	}  	err = fnic_trace_debugfs_init();  	if (err < 0) { -		printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n"); +		pr_err("fnic: Failed to initialize debugfs for tracing\n");  		goto err_fnic_trace_debugfs_init;  	} -	printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n"); +	pr_info("fnic: Successfully Initialized Trace Buffer\n");  	return err;  err_fnic_trace_debugfs_init:  	fnic_trace_free(); @@ -271,3 +467,314 @@ void fnic_trace_free(void)  	}  	printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");  } + +/* + * fnic_fc_ctlr_trace_buf_init - + * Initialize trace buffer to log fnic control frames + * Description: + * Initialize trace buffer data structure by allocating + * required memory for trace data as well as for Indexes. + * Frame size is 256 bytes and + * memory is allocated for 1024 entries of 256 bytes. + * Page_offset(Index) is set to the address of trace entry + * and page_offset is initialized by adding frame size + * to the previous page_offset entry. + */ + +int fnic_fc_trace_init(void) +{ +	unsigned long fc_trace_buf_head; +	int err = 0; +	int i; + +	fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/ +				FC_TRC_SIZE_BYTES; +	fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc( +					fnic_fc_trace_max_pages * PAGE_SIZE); +	if (!fnic_fc_ctlr_trace_buf_p) { +		pr_err("fnic: Failed to allocate memory for " +		       "FC Control Trace Buf\n"); +		err = -ENOMEM; +		goto err_fnic_fc_ctlr_trace_buf_init; +	} + +	memset((void *)fnic_fc_ctlr_trace_buf_p, 0, +			fnic_fc_trace_max_pages * PAGE_SIZE); + +	/* Allocate memory for page offset */ +	fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries * +						sizeof(unsigned long)); +	if (!fc_trace_entries.page_offset) { +		pr_err("fnic:Failed to allocate memory for page_offset\n"); +		if (fnic_fc_ctlr_trace_buf_p) { +			pr_err("fnic: Freeing FC Control Trace Buf\n"); +			vfree((void *)fnic_fc_ctlr_trace_buf_p); +			fnic_fc_ctlr_trace_buf_p = 0; +		} +		err = -ENOMEM; +		goto err_fnic_fc_ctlr_trace_buf_init; +	} +	memset((void *)fc_trace_entries.page_offset, 0, +	       (fc_trace_max_entries * sizeof(unsigned long))); + +	fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; +	fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p; + +	/* +	* Set up fc_trace_entries.page_offset field with memory location +	* for every trace entry +	*/ +	for (i = 0; i < fc_trace_max_entries; i++) { +		fc_trace_entries.page_offset[i] = fc_trace_buf_head; +		fc_trace_buf_head += FC_TRC_SIZE_BYTES; +	} +	err = fnic_fc_trace_debugfs_init(); +	if (err < 0) { +		pr_err("fnic: Failed to initialize FC_CTLR tracing.\n"); +		goto err_fnic_fc_ctlr_trace_debugfs_init; +	} +	pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n"); +	return err; + +err_fnic_fc_ctlr_trace_debugfs_init: +	fnic_fc_trace_free(); +err_fnic_fc_ctlr_trace_buf_init: +	return err; +} + +/* + * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures. + */ +void fnic_fc_trace_free(void) +{ +	fnic_fc_tracing_enabled = 0; +	fnic_fc_trace_debugfs_terminate(); +	if (fc_trace_entries.page_offset) { +		vfree((void *)fc_trace_entries.page_offset); +		fc_trace_entries.page_offset = NULL; +	} +	if (fnic_fc_ctlr_trace_buf_p) { +		vfree((void *)fnic_fc_ctlr_trace_buf_p); +		fnic_fc_ctlr_trace_buf_p = 0; +	} +	pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n"); +} + +/* + * fnic_fc_ctlr_set_trace_data: + *       Maintain rd & wr idx accordingly and set data + * Passed parameters: + *       host_no: host number accociated with fnic + *       frame_type: send_frame, rece_frame or link event + *       fc_frame: pointer to fc_frame + *       frame_len: Length of the fc_frame + * Description: + *   This routine will get next available wr_idx and + *   copy all passed trace data to the buffer pointed by wr_idx + *   and increment wr_idx. It will also make sure that we dont + *   overwrite the entry which we are reading and also + *   wrap around if we reach the maximum entries. + * Returned Value: + *   It will return 0 for success or -1 for failure + */ +int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, +				char *frame, u32 fc_trc_frame_len) +{ +	unsigned long flags; +	struct fc_trace_hdr *fc_buf; +	unsigned long eth_fcoe_hdr_len; +	char *fc_trace; + +	if (fnic_fc_tracing_enabled == 0) +		return 0; + +	spin_lock_irqsave(&fnic_fc_trace_lock, flags); + +	if (fnic_fc_trace_cleared == 1) { +		fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0; +		pr_info("fnic: Reseting the read idx\n"); +		memset((void *)fnic_fc_ctlr_trace_buf_p, 0, +				fnic_fc_trace_max_pages * PAGE_SIZE); +		fnic_fc_trace_cleared = 0; +	} + +	fc_buf = (struct fc_trace_hdr *) +		fc_trace_entries.page_offset[fc_trace_entries.wr_idx]; + +	fc_trace_entries.wr_idx++; + +	if (fc_trace_entries.wr_idx >= fc_trace_max_entries) +		fc_trace_entries.wr_idx = 0; + +	if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { +		fc_trace_entries.rd_idx++; +		if (fc_trace_entries.rd_idx >= fc_trace_max_entries) +			fc_trace_entries.rd_idx = 0; +	} + +	fc_buf->time_stamp = CURRENT_TIME; +	fc_buf->host_no = host_no; +	fc_buf->frame_type = frame_type; + +	fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf); + +	/* During the receive path, we do not have eth hdr as well as fcoe hdr +	 * at trace entry point so we will stuff 0xff just to make it generic. +	 */ +	if (frame_type == FNIC_FC_RECV) { +		eth_fcoe_hdr_len = sizeof(struct ethhdr) + +					sizeof(struct fcoe_hdr); +		fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len; +		memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); +		/* Copy the rest of data frame */ +		memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, +		min_t(u8, fc_trc_frame_len, +			(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); +	} else { +		memcpy((char *)fc_trace, (void *)frame, +		min_t(u8, fc_trc_frame_len, +			(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE))); +	} + +	/* Store the actual received length */ +	fc_buf->frame_len = fc_trc_frame_len; + +	spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); +	return 0; +} + +/* + * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file + * Passed parameter: + *       @fnic_dbgfs_t: pointer to debugfs trace buffer + *       rdata_flag: 1 => Unformated file + *                   0 => formated file + * Description: + *       This routine will copy the trace data to memory file with + *       proper formatting and also copy to another memory + *       file without formatting for further procesing. + * Retrun Value: + *       Number of bytes that were dumped into fnic_dbgfs_t + */ + +int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag) +{ +	int rd_idx, wr_idx; +	unsigned long flags; +	int len = 0, j; +	struct fc_trace_hdr *tdata; +	char *fc_trace; + +	spin_lock_irqsave(&fnic_fc_trace_lock, flags); +	if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) { +		spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); +		pr_info("fnic: Buffer is empty\n"); +		return 0; +	} +	rd_idx = fc_trace_entries.rd_idx; +	wr_idx = fc_trace_entries.wr_idx; +	if (rdata_flag == 0) { +		len += snprintf(fnic_dbgfs_prt->buffer + len, +			(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, +			"Time Stamp (UTC)\t\t" +			"Host No:   F Type:  len:     FCoE_FRAME:\n"); +	} + +	while (rd_idx != wr_idx) { +		tdata = (struct fc_trace_hdr *) +			fc_trace_entries.page_offset[rd_idx]; +		if (!tdata) { +			pr_info("fnic: Rd data is NULL\n"); +			spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); +			return 0; +		} +		if (rdata_flag == 0) { +			copy_and_format_trace_data(tdata, +				fnic_dbgfs_prt, &len, rdata_flag); +		} else { +			fc_trace = (char *)tdata; +			for (j = 0; j < FC_TRC_SIZE_BYTES; j++) { +				len += snprintf(fnic_dbgfs_prt->buffer + len, +				(fnic_fc_trace_max_pages * PAGE_SIZE * 3) +				- len, "%02x", fc_trace[j] & 0xff); +			} /* for loop */ +			len += snprintf(fnic_dbgfs_prt->buffer + len, +				(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, +				"\n"); +		} +		rd_idx++; +		if (rd_idx > (fc_trace_max_entries - 1)) +			rd_idx = 0; +	} + +	spin_unlock_irqrestore(&fnic_fc_trace_lock, flags); +	return len; +} + +/* + * copy_and_format_trace_data: Copy formatted data to char * buffer + * Passed Parameter: + *      @fc_trace_hdr_t: pointer to trace data + *      @fnic_dbgfs_t: pointer to debugfs trace buffer + *      @orig_len: pointer to len + *      rdata_flag: 0 => Formated file, 1 => Unformated file + * Description: + *      This routine will format and copy the passed trace data + *      for formated file or unformated file accordingly. + */ + +void copy_and_format_trace_data(struct fc_trace_hdr *tdata, +				fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len, +				u8 rdata_flag) +{ +	struct tm tm; +	int j, i = 1, len; +	char *fc_trace, *fmt; +	int ethhdr_len = sizeof(struct ethhdr) - 1; +	int fcoehdr_len = sizeof(struct fcoe_hdr); +	int fchdr_len = sizeof(struct fc_frame_header); +	int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3; + +	tdata->frame_type = tdata->frame_type & 0x7F; + +	len = *orig_len; + +	time_to_tm(tdata->time_stamp.tv_sec, 0, &tm); + +	fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x       %c%8x\t"; +	len += snprintf(fnic_dbgfs_prt->buffer + len, +		(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, +		fmt, +		tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, +		tm.tm_hour, tm.tm_min, tm.tm_sec, +		tdata->time_stamp.tv_nsec, tdata->host_no, +		tdata->frame_type, tdata->frame_len); + +	fc_trace = (char *)FC_TRACE_ADDRESS(tdata); + +	for (j = 0; j < min_t(u8, tdata->frame_len, +		(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) { +		if (tdata->frame_type == FNIC_FC_LE) { +			len += snprintf(fnic_dbgfs_prt->buffer + len, +				max_size - len, "%c", fc_trace[j]); +		} else { +			len += snprintf(fnic_dbgfs_prt->buffer + len, +				max_size - len, "%02x", fc_trace[j] & 0xff); +			len += snprintf(fnic_dbgfs_prt->buffer + len, +				max_size - len, " "); +			if (j == ethhdr_len || +				j == ethhdr_len + fcoehdr_len || +				j == ethhdr_len + fcoehdr_len + fchdr_len || +				(i > 3 && j%fchdr_len == 0)) { +				len += snprintf(fnic_dbgfs_prt->buffer +					+ len, (fnic_fc_trace_max_pages +					* PAGE_SIZE * 3) - len, +					"\n\t\t\t\t\t\t\t\t"); +				i++; +			} +		} /* end of else*/ +	} /* End of for loop*/ +	len += snprintf(fnic_dbgfs_prt->buffer + len, +		max_size - len, "\n"); +	*orig_len = len; +} diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h index cef42b4c4d6..a8aa0578fcb 100644 --- a/drivers/scsi/fnic/fnic_trace.h +++ b/drivers/scsi/fnic/fnic_trace.h @@ -19,6 +19,17 @@  #define __FNIC_TRACE_H__  #define FNIC_ENTRY_SIZE_BYTES 64 +#define FC_TRC_SIZE_BYTES 256 +#define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr) + +/* + * Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type + * of frame 1 => Eth frame, 0=> FC frame + */ + +#define FNIC_FC_RECV 0x52 /* Character R */ +#define FNIC_FC_SEND 0x54 /* Character T */ +#define FNIC_FC_LE 0x4C /* Character L */  extern ssize_t simple_read_from_buffer(void __user *to,  					  size_t count, @@ -30,6 +41,10 @@ extern unsigned int fnic_trace_max_pages;  extern int fnic_tracing_enabled;  extern unsigned int trace_max_pages; +extern unsigned int fnic_fc_trace_max_pages; +extern int fnic_fc_tracing_enabled; +extern int fnic_fc_trace_cleared; +  typedef struct fnic_trace_dbg {  	int wr_idx;  	int rd_idx; @@ -56,6 +71,16 @@ struct fnic_trace_data {  typedef struct fnic_trace_data fnic_trace_data_t; +struct fc_trace_hdr { +	struct timespec time_stamp; +	u32 host_no; +	u8 frame_type; +	u8 frame_len; +} __attribute__((__packed__)); + +#define FC_TRACE_ADDRESS(a) \ +	((unsigned long)(a) + sizeof(struct fc_trace_hdr)) +  #define FNIC_TRACE_ENTRY_SIZE \  		  (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t)) @@ -84,7 +109,21 @@ fnic_trace_data_t *fnic_trace_get_buf(void);  int fnic_get_trace_data(fnic_dbgfs_t *);  int fnic_trace_buf_init(void);  void fnic_trace_free(void); +int fnic_debugfs_init(void); +void fnic_debugfs_terminate(void);  int fnic_trace_debugfs_init(void);  void fnic_trace_debugfs_terminate(void); +/* Fnic FC CTLR Trace releated function */ +int fnic_fc_trace_init(void); +void fnic_fc_trace_free(void); +int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, +				char *frame, u32 fc_frame_len); +int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag); +void copy_and_format_trace_data(struct fc_trace_hdr *tdata, +				fnic_dbgfs_t *fnic_dbgfs_prt, +				int *len, u8 rdata_flag); +int fnic_fc_trace_debugfs_init(void); +void fnic_fc_trace_debugfs_terminate(void); +  #endif diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c index 5cec6c60ca2..a1bc8ca958e 100644 --- a/drivers/scsi/g_NCR5380.c +++ b/drivers/scsi/g_NCR5380.c @@ -78,10 +78,6 @@   *        */ -/* - * $Log: generic_NCR5380.c,v $ - */ -  /* settings for DTC3181E card with only Mustek scanner attached */  #define USLEEP  #define USLEEP_POLL	1 @@ -461,7 +457,7 @@ int __init generic_NCR5380_detect(struct scsi_host_template * tpnt)  		if (instance->irq != SCSI_IRQ_NONE)  			if (request_irq(instance->irq, generic_NCR5380_intr, -					IRQF_DISABLED, "NCR5380", instance)) { +					0, "NCR5380", instance)) {  				printk(KERN_WARNING "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);  				instance->irq = SCSI_IRQ_NONE;  			} diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 1bcdb7beb77..703adf78e0b 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -25,10 +25,6 @@   * 1+ (800) 334-5454   */ -/* - * $Log: generic_NCR5380.h,v $ - */ -  #ifndef GENERIC_NCR5380_H  #define GENERIC_NCR5380_H @@ -58,8 +54,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);  #define CAN_QUEUE 16  #endif -#ifndef HOSTS_C -  #define __STRVAL(x) #x  #define STRVAL(x) __STRVAL(x) @@ -131,7 +125,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);  #define BOARD_NCR53C400A 2  #define BOARD_DTC3181E	3 -#endif /* else def HOSTS_C */  #endif /* ndef ASM */  #endif /* GENERIC_NCR5380_H */ diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 6d55b4e7e79..0f1ae13ce7c 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -594,8 +594,6 @@ static void gdth_pci_remove_one(struct pci_dev *pdev)  {  	gdth_ha_str *ha = pci_get_drvdata(pdev); -	pci_set_drvdata(pdev, NULL); -  	list_del(&ha->list);  	gdth_remove_one(ha); @@ -4686,6 +4684,7 @@ static struct scsi_host_template gdth_template = {          .cmd_per_lun            = GDTH_MAXC_P_L,          .unchecked_isa_dma      = 1,          .use_clustering         = ENABLE_CLUSTERING, +	.no_write_same		= 1,  };  #ifdef CONFIG_ISA @@ -4712,7 +4711,7 @@ static int __init gdth_isa_probe_one(u32 isa_bios)  	printk("Configuring GDT-ISA HA at BIOS 0x%05X IRQ %u DRQ %u\n",  		isa_bios, ha->irq, ha->drq); -	error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha); +	error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);  	if (error) {  		printk("GDT-ISA: Unable to allocate IRQ\n");  		goto out_host_put; @@ -4844,7 +4843,7 @@ static int __init gdth_eisa_probe_one(u16 eisa_slot)  	printk("Configuring GDT-EISA HA at Slot %d IRQ %u\n",  		eisa_slot >> 12, ha->irq); -	error = request_irq(ha->irq, gdth_interrupt, IRQF_DISABLED, "gdth", ha); +	error = request_irq(ha->irq, gdth_interrupt, 0, "gdth", ha);  	if (error) {  		printk("GDT-EISA: Unable to allocate IRQ\n");  		goto out_host_put; @@ -4980,7 +4979,7 @@ static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)  		ha->irq);  	error = request_irq(ha->irq, gdth_interrupt, -				IRQF_DISABLED|IRQF_SHARED, "gdth", ha); +				IRQF_SHARED, "gdth", ha);  	if (error) {  		printk("GDT-PCI: Unable to allocate IRQ\n");  		goto out_host_put; diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c index 2203ac28110..3b6f83ffddc 100644 --- a/drivers/scsi/gvp11.c +++ b/drivers/scsi/gvp11.c @@ -310,7 +310,7 @@ static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)  	if (!request_mem_region(address, 256, "wd33c93"))  		return -EBUSY; -	regs = (struct gvp11_scsiregs *)(ZTWO_VADDR(address)); +	regs = ZTWO_VADDR(address);  	error = check_wd33c93(regs);  	if (error) diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index df0c3c71ea4..3cbb57a8b84 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -169,6 +169,7 @@ void scsi_remove_host(struct Scsi_Host *shost)  	spin_unlock_irqrestore(shost->host_lock, flags);  	scsi_autopm_get_host(shost); +	flush_workqueue(shost->tmf_work_q);  	scsi_forget_host(shost);  	mutex_unlock(&shost->scan_mutex);  	scsi_proc_host_rm(shost); @@ -294,6 +295,8 @@ static void scsi_host_dev_release(struct device *dev)  	scsi_proc_hostdir_rm(shost->hostt); +	if (shost->tmf_work_q) +		destroy_workqueue(shost->tmf_work_q);  	if (shost->ehandler)  		kthread_stop(shost->ehandler);  	if (shost->work_q) @@ -316,6 +319,12 @@ static void scsi_host_dev_release(struct device *dev)  	kfree(shost);  } +static int shost_eh_deadline = -1; + +module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(eh_deadline, +		 "SCSI EH timeout in seconds (should be between 0 and 2^31-1)"); +  static struct device_type scsi_host_type = {  	.name =		"scsi_host",  	.release =	scsi_host_dev_release, @@ -354,7 +363,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)  	INIT_LIST_HEAD(&shost->eh_cmd_q);  	INIT_LIST_HEAD(&shost->starved_list);  	init_waitqueue_head(&shost->host_wait); -  	mutex_init(&shost->scan_mutex);  	/* @@ -388,6 +396,17 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)  	shost->unchecked_isa_dma = sht->unchecked_isa_dma;  	shost->use_clustering = sht->use_clustering;  	shost->ordered_tag = sht->ordered_tag; +	shost->no_write_same = sht->no_write_same; + +	if (shost_eh_deadline == -1 || !sht->eh_host_reset_handler) +		shost->eh_deadline = -1; +	else if ((ulong) shost_eh_deadline * HZ > INT_MAX) { +		shost_printk(KERN_WARNING, shost, +			     "eh_deadline %u too large, setting to %u\n", +			     shost_eh_deadline, INT_MAX / HZ); +		shost->eh_deadline = INT_MAX; +	} else +		shost->eh_deadline = shost_eh_deadline * HZ;  	if (sht->supported_mode == MODE_UNKNOWN)  		/* means we didn't set it ... default to INITIATOR */ @@ -436,9 +455,19 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)  		goto fail_kfree;  	} +	shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d", +					    WQ_UNBOUND | WQ_MEM_RECLAIM, +					   1, shost->host_no); +	if (!shost->tmf_work_q) { +		printk(KERN_WARNING "scsi%d: failed to create tmf workq\n", +		       shost->host_no); +		goto fail_kthread; +	}  	scsi_proc_hostdir_add(shost->hostt);  	return shost; + fail_kthread: +	kthread_stop(shost->ehandler);   fail_kfree:  	kfree(shost);  	return NULL; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 891c86b6625..31184b35370 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1,6 +1,6 @@  /*   *    Disk Array driver for HP Smart Array SAS controllers - *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. + *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.   *   *    This program is free software; you can redistribute it and/or modify   *    it under the terms of the GNU General Public License as published by @@ -29,7 +29,6 @@  #include <linux/delay.h>  #include <linux/fs.h>  #include <linux/timer.h> -#include <linux/seq_file.h>  #include <linux/init.h>  #include <linux/spinlock.h>  #include <linux/compat.h> @@ -48,13 +47,14 @@  #include <linux/string.h>  #include <linux/bitmap.h>  #include <linux/atomic.h> -#include <linux/kthread.h>  #include <linux/jiffies.h> +#include <linux/percpu.h> +#include <asm/div64.h>  #include "hpsa_cmd.h"  #include "hpsa.h"  /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ -#define HPSA_DRIVER_VERSION "3.4.0-1" +#define HPSA_DRIVER_VERSION "3.4.4-1"  #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"  #define HPSA "hpsa" @@ -96,11 +96,9 @@ static const struct pci_device_id hpsa_pci_device_id[] = {  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353}, -	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x334D},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356}, -	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1920},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923}, @@ -118,9 +116,20 @@ static const struct pci_device_id hpsa_pci_device_id[] = {  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE}, +	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, +	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, +	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, +	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, +	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},  	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,  		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},  	{0,} @@ -144,7 +153,6 @@ static struct board_type products[] = {  	{0x3351103C, "Smart Array P420", &SA5_access},  	{0x3352103C, "Smart Array P421", &SA5_access},  	{0x3353103C, "Smart Array P822", &SA5_access}, -	{0x334D103C, "Smart Array P822se", &SA5_access},  	{0x3354103C, "Smart Array P420i", &SA5_access},  	{0x3355103C, "Smart Array P220i", &SA5_access},  	{0x3356103C, "Smart Array P721m", &SA5_access}, @@ -164,22 +172,30 @@ static struct board_type products[] = {  	{0x21C3103C, "Smart Array", &SA5_access},  	{0x21C4103C, "Smart Array", &SA5_access},  	{0x21C5103C, "Smart Array", &SA5_access}, +	{0x21C6103C, "Smart Array", &SA5_access},  	{0x21C7103C, "Smart Array", &SA5_access},  	{0x21C8103C, "Smart Array", &SA5_access},  	{0x21C9103C, "Smart Array", &SA5_access}, +	{0x21CA103C, "Smart Array", &SA5_access}, +	{0x21CB103C, "Smart Array", &SA5_access}, +	{0x21CC103C, "Smart Array", &SA5_access}, +	{0x21CD103C, "Smart Array", &SA5_access}, +	{0x21CE103C, "Smart Array", &SA5_access}, +	{0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, +	{0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, +	{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, +	{0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, +	{0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},  	{0xFFFF103C, "Unknown Smart Array", &SA5_access},  };  static int number_of_controllers; -static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list); -static spinlock_t lockup_detector_lock; -static struct task_struct *hpsa_lockup_detector; -  static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);  static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);  static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); -static void start_io(struct ctlr_info *h); +static void lock_and_start_io(struct ctlr_info *h); +static void start_io(struct ctlr_info *h, unsigned long *flags);  #ifdef CONFIG_COMPAT  static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); @@ -190,8 +206,9 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);  static struct CommandList *cmd_alloc(struct ctlr_info *h);  static struct CommandList *cmd_special_alloc(struct ctlr_info *h);  static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, -	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, +	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,  	int cmd_type); +#define VPD_PAGE (1 << 8)  static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);  static void hpsa_scan_start(struct Scsi_Host *); @@ -212,7 +229,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,  	struct CommandList *c);  /* performant mode helper functions */  static void calc_bucket_map(int *bucket, int num_buckets, -	int nsgs, int *bucket_map); +	int nsgs, int min_blocks, int *bucket_map);  static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);  static inline u32 next_command(struct ctlr_info *h, u8 q);  static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, @@ -224,8 +241,14 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);  static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,  				     int wait_for_ready);  static inline void finish_cmd(struct CommandList *c); +static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);  #define BOARD_NOT_READY 0  #define BOARD_READY 1 +static void hpsa_drain_accel_commands(struct ctlr_info *h); +static void hpsa_flush_cache(struct ctlr_info *h); +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, +	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, +	u8 *scsi3addr);  static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)  { @@ -288,6 +311,55 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c)  	return 1;  } +static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, +					 struct device_attribute *attr, +					 const char *buf, size_t count) +{ +	int status, len; +	struct ctlr_info *h; +	struct Scsi_Host *shost = class_to_shost(dev); +	char tmpbuf[10]; + +	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) +		return -EACCES; +	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; +	strncpy(tmpbuf, buf, len); +	tmpbuf[len] = '\0'; +	if (sscanf(tmpbuf, "%d", &status) != 1) +		return -EINVAL; +	h = shost_to_hba(shost); +	h->acciopath_status = !!status; +	dev_warn(&h->pdev->dev, +		"hpsa: HP SSD Smart Path %s via sysfs update.\n", +		h->acciopath_status ? "enabled" : "disabled"); +	return count; +} + +static ssize_t host_store_raid_offload_debug(struct device *dev, +					 struct device_attribute *attr, +					 const char *buf, size_t count) +{ +	int debug_level, len; +	struct ctlr_info *h; +	struct Scsi_Host *shost = class_to_shost(dev); +	char tmpbuf[10]; + +	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) +		return -EACCES; +	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; +	strncpy(tmpbuf, buf, len); +	tmpbuf[len] = '\0'; +	if (sscanf(tmpbuf, "%d", &debug_level) != 1) +		return -EINVAL; +	if (debug_level < 0) +		debug_level = 0; +	h = shost_to_hba(shost); +	h->raid_offload_debug = debug_level; +	dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", +		h->raid_offload_debug); +	return count; +} +  static ssize_t host_store_rescan(struct device *dev,  				 struct device_attribute *attr,  				 const char *buf, size_t count) @@ -335,6 +407,17 @@ static ssize_t host_show_transport_mode(struct device *dev,  			"performant" : "simple");  } +static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, +	struct device_attribute *attr, char *buf) +{ +	struct ctlr_info *h; +	struct Scsi_Host *shost = class_to_shost(dev); + +	h = shost_to_hba(shost); +	return snprintf(buf, 30, "HP SSD Smart Path %s\n", +		(h->acciopath_status == 1) ?  "enabled" : "disabled"); +} +  /* List of controllers which cannot be hard reset on kexec with reset_devices */  static u32 unresettable_controller[] = {  	0x324a103C, /* Smart Array P712m */ @@ -424,6 +507,13 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])  static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",  	"1(ADM)", "UNKNOWN"  }; +#define HPSA_RAID_0	0 +#define HPSA_RAID_4	1 +#define HPSA_RAID_1	2	/* also used for RAID 10 */ +#define HPSA_RAID_5	3	/* also used for RAID 50 */ +#define HPSA_RAID_51	4 +#define HPSA_RAID_6	5	/* also used for RAID 60 */ +#define HPSA_RAID_ADM	6	/* also used for RAID 1+0 ADM */  #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)  static ssize_t raid_level_show(struct device *dev, @@ -512,10 +602,39 @@ static ssize_t unique_id_show(struct device *dev,  			sn[12], sn[13], sn[14], sn[15]);  } +static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, +	     struct device_attribute *attr, char *buf) +{ +	struct ctlr_info *h; +	struct scsi_device *sdev; +	struct hpsa_scsi_dev_t *hdev; +	unsigned long flags; +	int offload_enabled; + +	sdev = to_scsi_device(dev); +	h = sdev_to_hba(sdev); +	spin_lock_irqsave(&h->lock, flags); +	hdev = sdev->hostdata; +	if (!hdev) { +		spin_unlock_irqrestore(&h->lock, flags); +		return -ENODEV; +	} +	offload_enabled = hdev->offload_enabled; +	spin_unlock_irqrestore(&h->lock, flags); +	return snprintf(buf, 20, "%d\n", offload_enabled); +} +  static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);  static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);  static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);  static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); +static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, +			host_show_hp_ssd_smart_path_enabled, NULL); +static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, +		host_show_hp_ssd_smart_path_status, +		host_store_hp_ssd_smart_path_status); +static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, +			host_store_raid_offload_debug);  static DEVICE_ATTR(firmware_revision, S_IRUGO,  	host_show_firmware_revision, NULL);  static DEVICE_ATTR(commands_outstanding, S_IRUGO, @@ -529,6 +648,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {  	&dev_attr_raid_level,  	&dev_attr_lunid,  	&dev_attr_unique_id, +	&dev_attr_hp_ssd_smart_path_enabled,  	NULL,  }; @@ -538,6 +658,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {  	&dev_attr_commands_outstanding,  	&dev_attr_transport_mode,  	&dev_attr_resettable, +	&dev_attr_hp_ssd_smart_path_status, +	&dev_attr_raid_offload_debug,  	NULL,  }; @@ -562,6 +684,7 @@ static struct scsi_host_template hpsa_driver_template = {  	.sdev_attrs = hpsa_sdev_attrs,  	.shost_attrs = hpsa_shost_attrs,  	.max_sectors = 8192, +	.no_write_same = 1,  }; @@ -574,9 +697,12 @@ static inline void addQ(struct list_head *list, struct CommandList *c)  static inline u32 next_command(struct ctlr_info *h, u8 q)  {  	u32 a; -	struct reply_pool *rq = &h->reply_queue[q]; +	struct reply_queue_buffer *rq = &h->reply_queue[q];  	unsigned long flags; +	if (h->transMethod & CFGTBL_Trans_io_accel1) +		return h->access.command_completed(h, q); +  	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))  		return h->access.command_completed(h, q); @@ -597,6 +723,32 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)  	return a;  } +/* + * There are some special bits in the bus address of the + * command that we have to set for the controller to know + * how to process the command: + * + * Normal performant mode: + * bit 0: 1 means performant mode, 0 means simple mode. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 0) + * + * ioaccel1 mode: + * bit 0 = "performant mode" bit. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 110) + * (command type is needed because ioaccel1 mode + * commands are submitted through the same register as normal + * mode commands, so this is how the controller knows whether + * the command is normal mode or ioaccel1 mode.) + * + * ioaccel2 mode: + * bit 0 = "performant mode" bit. + * bits 1-4 = block fetch table entry (note extra bit) + * bits 4-6 = not needed, because ioaccel2 mode has + * a separate special register for submitting commands. + */ +  /* set_performant_mode: Modify the tag for cciss performant   * set bit 0 for pull model, bits 3-1 for block fetch   * register number @@ -605,12 +757,47 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)  {  	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {  		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); -		if (likely(h->msix_vector)) +		if (likely(h->msix_vector > 0))  			c->Header.ReplyQueue =  				raw_smp_processor_id() % h->nreply_queues;  	}  } +static void set_ioaccel1_performant_mode(struct ctlr_info *h, +						struct CommandList *c) +{ +	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; + +	/* Tell the controller to post the reply to the queue for this +	 * processor.  This seems to give the best I/O throughput. +	 */ +	cp->ReplyQueue = smp_processor_id() % h->nreply_queues; +	/* Set the bits in the address sent down to include: +	 *  - performant mode bit (bit 0) +	 *  - pull count (bits 1-3) +	 *  - command type (bits 4-6) +	 */ +	c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | +					IOACCEL1_BUSADDR_CMDTYPE; +} + +static void set_ioaccel2_performant_mode(struct ctlr_info *h, +						struct CommandList *c) +{ +	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + +	/* Tell the controller to post the reply to the queue for this +	 * processor.  This seems to give the best I/O throughput. +	 */ +	cp->reply_queue = smp_processor_id() % h->nreply_queues; +	/* Set the bits in the address sent down to include: +	 *  - performant mode bit not used in ioaccel mode 2 +	 *  - pull count (bits 0-3) +	 *  - command type isn't needed for ioaccel2 +	 */ +	c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); +} +  static int is_firmware_flash_cmd(u8 *cdb)  {  	return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; @@ -645,13 +832,22 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,  {  	unsigned long flags; -	set_performant_mode(h, c); +	switch (c->cmd_type) { +	case CMD_IOACCEL1: +		set_ioaccel1_performant_mode(h, c); +		break; +	case CMD_IOACCEL2: +		set_ioaccel2_performant_mode(h, c); +		break; +	default: +		set_performant_mode(h, c); +	}  	dial_down_lockup_detection_during_fw_flash(h, c);  	spin_lock_irqsave(&h->lock, flags);  	addQ(&h->reqQ, c);  	h->Qdepth++; +	start_io(h, &flags);  	spin_unlock_irqrestore(&h->lock, flags); -	start_io(h);  }  static inline void removeQ(struct CommandList *c) @@ -789,6 +985,14 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,  	/* Raid level changed. */  	h->dev[entry]->raid_level = new_entry->raid_level; + +	/* Raid offload parameters changed. */ +	h->dev[entry]->offload_config = new_entry->offload_config; +	h->dev[entry]->offload_enabled = new_entry->offload_enabled; +	h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; +	h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; +	h->dev[entry]->raid_map = new_entry->raid_map; +  	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",  		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,  		new_entry->target, new_entry->lun); @@ -909,6 +1113,10 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,  	 */  	if (dev1->raid_level != dev2->raid_level)  		return 1; +	if (dev1->offload_config != dev2->offload_config) +		return 1; +	if (dev1->offload_enabled != dev2->offload_enabled) +		return 1;  	return 0;  } @@ -939,6 +1147,9 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,  					return DEVICE_UPDATED;  				return DEVICE_SAME;  			} else { +				/* Keep offline devices offline */ +				if (needle->volume_offline) +					return DEVICE_NOT_FOUND;  				return DEVICE_CHANGED;  			}  		} @@ -947,6 +1158,110 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,  	return DEVICE_NOT_FOUND;  } +static void hpsa_monitor_offline_device(struct ctlr_info *h, +					unsigned char scsi3addr[]) +{ +	struct offline_device_entry *device; +	unsigned long flags; + +	/* Check to see if device is already on the list */ +	spin_lock_irqsave(&h->offline_device_lock, flags); +	list_for_each_entry(device, &h->offline_device_list, offline_list) { +		if (memcmp(device->scsi3addr, scsi3addr, +			sizeof(device->scsi3addr)) == 0) { +			spin_unlock_irqrestore(&h->offline_device_lock, flags); +			return; +		} +	} +	spin_unlock_irqrestore(&h->offline_device_lock, flags); + +	/* Device is not on the list, add it. */ +	device = kmalloc(sizeof(*device), GFP_KERNEL); +	if (!device) { +		dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); +		return; +	} +	memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); +	spin_lock_irqsave(&h->offline_device_lock, flags); +	list_add_tail(&device->offline_list, &h->offline_device_list); +	spin_unlock_irqrestore(&h->offline_device_lock, flags); +} + +/* Print a message explaining various offline volume states */ +static void hpsa_show_volume_status(struct ctlr_info *h, +	struct hpsa_scsi_dev_t *sd) +{ +	if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +	switch (sd->volume_offline) { +	case HPSA_LV_OK: +		break; +	case HPSA_LV_UNDERGOING_ERASE: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_UNDERGOING_RPI: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_PENDING_RPI: +		dev_info(&h->pdev->dev, +				"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", +				h->scsi_host->host_no, +				sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_ENCRYPTED_NO_KEY: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_UNDERGOING_ENCRYPTION: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_PENDING_ENCRYPTION: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_PENDING_ENCRYPTION_REKEYING: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	} +} +  static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,  	struct hpsa_scsi_dev_t *sd[], int nsds)  { @@ -1011,6 +1326,20 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,  	for (i = 0; i < nsds; i++) {  		if (!sd[i]) /* if already added above. */  			continue; + +		/* Don't add devices which are NOT READY, FORMAT IN PROGRESS +		 * as the SCSI mid-layer does not handle such devices well. +		 * It relentlessly loops sending TUR at 3Hz, then READ(10) +		 * at 160Hz, and prevents the system from coming up. +		 */ +		if (sd[i]->volume_offline) { +			hpsa_show_volume_status(h, sd[i]); +			dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", +				h->scsi_host->host_no, +				sd[i]->bus, sd[i]->target, sd[i]->lun); +			continue; +		} +  		device_change = hpsa_scsi_find_entry(sd[i], h->dev,  					h->ndevices, &entry);  		if (device_change == DEVICE_NOT_FOUND) { @@ -1029,6 +1358,17 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,  	}  	spin_unlock_irqrestore(&h->devlock, flags); +	/* Monitor devices which are in one of several NOT READY states to be +	 * brought online later. This must be done without holding h->devlock, +	 * so don't touch h->dev[] +	 */ +	for (i = 0; i < nsds; i++) { +		if (!sd[i]) /* if already added above. */ +			continue; +		if (sd[i]->volume_offline) +			hpsa_monitor_offline_device(h, sd[i]->scsi3addr); +	} +  	/* Don't notify scsi mid layer of any changes the first time through  	 * (or if there are no changes) scsi_scan_host will do it later the  	 * first time through. @@ -1194,11 +1534,156 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,  	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);  } + +/* Decode the various types of errors on ioaccel2 path. + * Return 1 for any error that should generate a RAID path retry. + * Return 0 for errors that don't require a RAID path retry. + */ +static int handle_ioaccel_mode2_error(struct ctlr_info *h, +					struct CommandList *c, +					struct scsi_cmnd *cmd, +					struct io_accel2_cmd *c2) +{ +	int data_len; +	int retry = 0; + +	switch (c2->error_data.serv_response) { +	case IOACCEL2_SERV_RESPONSE_COMPLETE: +		switch (c2->error_data.status) { +		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: +			dev_warn(&h->pdev->dev, +				"%s: task complete with check condition.\n", +				"HP SSD Smart Path"); +			cmd->result |= SAM_STAT_CHECK_CONDITION; +			if (c2->error_data.data_present != +					IOACCEL2_SENSE_DATA_PRESENT) { +				memset(cmd->sense_buffer, 0, +					SCSI_SENSE_BUFFERSIZE); +				break; +			} +			/* copy the sense data */ +			data_len = c2->error_data.sense_data_len; +			if (data_len > SCSI_SENSE_BUFFERSIZE) +				data_len = SCSI_SENSE_BUFFERSIZE; +			if (data_len > sizeof(c2->error_data.sense_data_buff)) +				data_len = +					sizeof(c2->error_data.sense_data_buff); +			memcpy(cmd->sense_buffer, +				c2->error_data.sense_data_buff, data_len); +			retry = 1; +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: +			dev_warn(&h->pdev->dev, +				"%s: task complete with BUSY status.\n", +				"HP SSD Smart Path"); +			retry = 1; +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: +			dev_warn(&h->pdev->dev, +				"%s: task complete with reservation conflict.\n", +				"HP SSD Smart Path"); +			retry = 1; +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: +			/* Make scsi midlayer do unlimited retries */ +			cmd->result = DID_IMM_RETRY << 16; +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: +			dev_warn(&h->pdev->dev, +				"%s: task complete with aborted status.\n", +				"HP SSD Smart Path"); +			retry = 1; +			break; +		default: +			dev_warn(&h->pdev->dev, +				"%s: task complete with unrecognized status: 0x%02x\n", +				"HP SSD Smart Path", c2->error_data.status); +			retry = 1; +			break; +		} +		break; +	case IOACCEL2_SERV_RESPONSE_FAILURE: +		/* don't expect to get here. */ +		dev_warn(&h->pdev->dev, +			"unexpected delivery or target failure, status = 0x%02x\n", +			c2->error_data.status); +		retry = 1; +		break; +	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: +		break; +	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: +		break; +	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: +		dev_warn(&h->pdev->dev, "task management function rejected.\n"); +		retry = 1; +		break; +	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: +		dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); +		break; +	default: +		dev_warn(&h->pdev->dev, +			"%s: Unrecognized server response: 0x%02x\n", +			"HP SSD Smart Path", +			c2->error_data.serv_response); +		retry = 1; +		break; +	} + +	return retry;	/* retry on raid path? */ +} + +static void process_ioaccel2_completion(struct ctlr_info *h, +		struct CommandList *c, struct scsi_cmnd *cmd, +		struct hpsa_scsi_dev_t *dev) +{ +	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; +	int raid_retry = 0; + +	/* check for good status */ +	if (likely(c2->error_data.serv_response == 0 && +			c2->error_data.status == 0)) { +		cmd_free(h, c); +		cmd->scsi_done(cmd); +		return; +	} + +	/* Any RAID offload error results in retry which will use +	 * the normal I/O path so the controller can handle whatever's +	 * wrong. +	 */ +	if (is_logical_dev_addr_mode(dev->scsi3addr) && +		c2->error_data.serv_response == +			IOACCEL2_SERV_RESPONSE_FAILURE) { +		dev->offload_enabled = 0; +		h->drv_req_rescan = 1;	/* schedule controller for a rescan */ +		cmd->result = DID_SOFT_ERROR << 16; +		cmd_free(h, c); +		cmd->scsi_done(cmd); +		return; +	} +	raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); +	/* If error found, disable Smart Path, schedule a rescan, +	 * and force a retry on the standard path. +	 */ +	if (raid_retry) { +		dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", +			"HP SSD Smart Path"); +		dev->offload_enabled = 0; /* Disable Smart Path */ +		h->drv_req_rescan = 1;	  /* schedule controller rescan */ +		cmd->result = DID_SOFT_ERROR << 16; +	} +	cmd_free(h, c); +	cmd->scsi_done(cmd); +} +  static void complete_scsi_command(struct CommandList *cp)  {  	struct scsi_cmnd *cmd;  	struct ctlr_info *h;  	struct ErrorInfo *ei; +	struct hpsa_scsi_dev_t *dev;  	unsigned char sense_key;  	unsigned char asc;      /* additional sense code */ @@ -1208,13 +1693,19 @@ static void complete_scsi_command(struct CommandList *cp)  	ei = cp->err_info;  	cmd = (struct scsi_cmnd *) cp->scsi_cmd;  	h = cp->h; +	dev = cmd->device->hostdata;  	scsi_dma_unmap(cmd); /* undo the DMA mappings */ -	if (cp->Header.SGTotal > h->max_cmd_sg_entries) +	if ((cp->cmd_type == CMD_SCSI) && +		(cp->Header.SGTotal > h->max_cmd_sg_entries))  		hpsa_unmap_sg_chain_block(h, cp);  	cmd->result = (DID_OK << 16); 		/* host byte */  	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */ + +	if (cp->cmd_type == CMD_IOACCEL2) +		return process_ioaccel2_completion(h, cp, cmd, dev); +  	cmd->result |= ei->ScsiStatus;  	/* copy the sense data whether we need to or not. */ @@ -1234,6 +1725,32 @@ static void complete_scsi_command(struct CommandList *cp)  		return;  	} +	/* For I/O accelerator commands, copy over some fields to the normal +	 * CISS header used below for error handling. +	 */ +	if (cp->cmd_type == CMD_IOACCEL1) { +		struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; +		cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); +		cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; +		cp->Header.Tag.lower = c->Tag.lower; +		cp->Header.Tag.upper = c->Tag.upper; +		memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); +		memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); + +		/* Any RAID offload error results in retry which will use +		 * the normal I/O path so the controller can handle whatever's +		 * wrong. +		 */ +		if (is_logical_dev_addr_mode(dev->scsi3addr)) { +			if (ei->CommandStatus == CMD_IOACCEL_DISABLED) +				dev->offload_enabled = 0; +			cmd->result = DID_SOFT_ERROR << 16; +			cmd_free(h, cp); +			cmd->scsi_done(cmd); +			return; +		} +	} +  	/* an error has occurred */  	switch (ei->CommandStatus) { @@ -1248,10 +1765,8 @@ static void complete_scsi_command(struct CommandList *cp)  		}  		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { -			if (check_for_unit_attention(h, cp)) { -				cmd->result = DID_SOFT_ERROR << 16; +			if (check_for_unit_attention(h, cp))  				break; -			}  			if (sense_key == ILLEGAL_REQUEST) {  				/*  				 * SCSI REPORT_LUNS is commonly unsupported on @@ -1289,7 +1804,7 @@ static void complete_scsi_command(struct CommandList *cp)  					"has check condition: aborted command: "  					"ASC: 0x%x, ASCQ: 0x%x\n",  					cp, asc, ascq); -				cmd->result = DID_SOFT_ERROR << 16; +				cmd->result |= DID_SOFT_ERROR << 16;  				break;  			}  			/* Must be some other type of check condition */ @@ -1398,6 +1913,14 @@ static void complete_scsi_command(struct CommandList *cp)  		cmd->result = DID_ERROR << 16;  		dev_warn(&h->pdev->dev, "Command unabortable\n");  		break; +	case CMD_IOACCEL_DISABLED: +		/* This only handles the direct pass-through case since RAID +		 * offload is handled above.  Just attempt a retry. +		 */ +		cmd->result = DID_SOFT_ERROR << 16; +		dev_warn(&h->pdev->dev, +				"cp %p had HP SSD Smart Path error\n", cp); +		break;  	default:  		cmd->result = DID_ERROR << 16;  		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", @@ -1447,6 +1970,7 @@ static int hpsa_map_one(struct pci_dev *pdev,  	cp->SG[0].Addr.upper =  	  (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);  	cp->SG[0].Len = buflen; +	cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */  	cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */  	cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */  	return 0; @@ -1462,20 +1986,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,  	wait_for_completion(&wait);  } +static u32 lockup_detected(struct ctlr_info *h) +{ +	int cpu; +	u32 rc, *lockup_detected; + +	cpu = get_cpu(); +	lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); +	rc = *lockup_detected; +	put_cpu(); +	return rc; +} +  static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,  	struct CommandList *c)  { -	unsigned long flags; -  	/* If controller lockup detected, fake a hardware error. */ -	spin_lock_irqsave(&h->lock, flags); -	if (unlikely(h->lockup_detected)) { -		spin_unlock_irqrestore(&h->lock, flags); +	if (unlikely(lockup_detected(h)))  		c->err_info->CommandStatus = CMD_HARDWARE_ERR; -	} else { -		spin_unlock_irqrestore(&h->lock, flags); +	else  		hpsa_scsi_do_simple_cmd_core(h, c); -	}  }  #define MAX_DRIVER_CMD_RETRIES 25 @@ -1499,17 +2029,37 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,  	hpsa_pci_unmap(h->pdev, c, 1, data_direction);  } -static void hpsa_scsi_interpret_error(struct CommandList *cp) +static void hpsa_print_cmd(struct ctlr_info *h, char *txt, +				struct CommandList *c)  { -	struct ErrorInfo *ei; +	const u8 *cdb = c->Request.CDB; +	const u8 *lun = c->Header.LUN.LunAddrBytes; + +	dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" +	" CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", +		txt, lun[0], lun[1], lun[2], lun[3], +		lun[4], lun[5], lun[6], lun[7], +		cdb[0], cdb[1], cdb[2], cdb[3], +		cdb[4], cdb[5], cdb[6], cdb[7], +		cdb[8], cdb[9], cdb[10], cdb[11], +		cdb[12], cdb[13], cdb[14], cdb[15]); +} + +static void hpsa_scsi_interpret_error(struct ctlr_info *h, +			struct CommandList *cp) +{ +	const struct ErrorInfo *ei = cp->err_info;  	struct device *d = &cp->h->pdev->dev; +	const u8 *sd = ei->SenseInfo; -	ei = cp->err_info;  	switch (ei->CommandStatus) {  	case CMD_TARGET_STATUS: -		dev_warn(d, "cmd %p has completed with errors\n", cp); -		dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, -				ei->ScsiStatus); +		hpsa_print_cmd(h, "SCSI status", cp); +		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) +			dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", +				sd[2] & 0x0f, sd[12], sd[13]); +		else +			dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);  		if (ei->ScsiStatus == 0)  			dev_warn(d, "SCSI status is abnormally zero.  "  			"(probably indicates selection timeout " @@ -1517,54 +2067,51 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)  			"firmware bug, circa July, 2001.)\n");  		break;  	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ -			dev_info(d, "UNDERRUN\n");  		break;  	case CMD_DATA_OVERRUN: -		dev_warn(d, "cp %p has completed with data overrun\n", cp); +		hpsa_print_cmd(h, "overrun condition", cp);  		break;  	case CMD_INVALID: {  		/* controller unfortunately reports SCSI passthru's  		 * to non-existent targets as invalid commands.  		 */ -		dev_warn(d, "cp %p is reported invalid (probably means " -			"target device no longer present)\n", cp); -		/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); -		print_cmd(cp);  */ +		hpsa_print_cmd(h, "invalid command", cp); +		dev_warn(d, "probably means device no longer present\n");  		}  		break;  	case CMD_PROTOCOL_ERR: -		dev_warn(d, "cp %p has protocol error \n", cp); +		hpsa_print_cmd(h, "protocol error", cp);  		break;  	case CMD_HARDWARE_ERR: -		/* cmd->result = DID_ERROR << 16; */ -		dev_warn(d, "cp %p had hardware error\n", cp); +		hpsa_print_cmd(h, "hardware error", cp);  		break;  	case CMD_CONNECTION_LOST: -		dev_warn(d, "cp %p had connection lost\n", cp); +		hpsa_print_cmd(h, "connection lost", cp);  		break;  	case CMD_ABORTED: -		dev_warn(d, "cp %p was aborted\n", cp); +		hpsa_print_cmd(h, "aborted", cp);  		break;  	case CMD_ABORT_FAILED: -		dev_warn(d, "cp %p reports abort failed\n", cp); +		hpsa_print_cmd(h, "abort failed", cp);  		break;  	case CMD_UNSOLICITED_ABORT: -		dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); +		hpsa_print_cmd(h, "unsolicited abort", cp);  		break;  	case CMD_TIMEOUT: -		dev_warn(d, "cp %p timed out\n", cp); +		hpsa_print_cmd(h, "timed out", cp);  		break;  	case CMD_UNABORTABLE: -		dev_warn(d, "Command unabortable\n"); +		hpsa_print_cmd(h, "unabortable", cp);  		break;  	default: -		dev_warn(d, "cp %p returned unknown status %x\n", cp, +		hpsa_print_cmd(h, "unknown status", cp); +		dev_warn(d, "Unknown command status %x\n",  				ei->CommandStatus);  	}  }  static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, -			unsigned char page, unsigned char *buf, +			u16 page, unsigned char *buf,  			unsigned char bufsize)  {  	int rc = IO_OK; @@ -1586,7 +2133,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,  	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);  	ei = c->err_info;  	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { -		hpsa_scsi_interpret_error(c); +		hpsa_scsi_interpret_error(h, c);  		rc = -1;  	}  out: @@ -1594,7 +2141,39 @@ out:  	return rc;  } -static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) +static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, +		unsigned char *scsi3addr, unsigned char page, +		struct bmic_controller_parameters *buf, size_t bufsize) +{ +	int rc = IO_OK; +	struct CommandList *c; +	struct ErrorInfo *ei; + +	c = cmd_special_alloc(h); + +	if (c == NULL) {			/* trouble... */ +		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); +		return -ENOMEM; +	} + +	if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, +			page, scsi3addr, TYPE_CMD)) { +		rc = -1; +		goto out; +	} +	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); +	ei = c->err_info; +	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { +		hpsa_scsi_interpret_error(h, c); +		rc = -1; +	} +out: +	cmd_special_free(h, c); +	return rc; +	} + +static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, +	u8 reset_type)  {  	int rc = IO_OK;  	struct CommandList *c; @@ -1608,14 +2187,15 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)  	}  	/* fill_cmd can't fail here, no data buffer to map. */ -	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, -			NULL, 0, 0, scsi3addr, TYPE_MSG); +	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, +			scsi3addr, TYPE_MSG); +	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */  	hpsa_scsi_do_simple_cmd_core(h, c);  	/* no unmap needed here because no data xfer. */  	ei = c->err_info;  	if (ei->CommandStatus != 0) { -		hpsa_scsi_interpret_error(c); +		hpsa_scsi_interpret_error(h, c);  		rc = -1;  	}  	cmd_special_free(h, c); @@ -1632,7 +2212,7 @@ static void hpsa_get_raid_level(struct ctlr_info *h,  	buf = kzalloc(64, GFP_KERNEL);  	if (!buf)  		return; -	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);  	if (rc == 0)  		*raid_level = buf[8];  	if (*raid_level > RAID_UNKNOWN) @@ -1641,6 +2221,204 @@ static void hpsa_get_raid_level(struct ctlr_info *h,  	return;  } +#define HPSA_MAP_DEBUG +#ifdef HPSA_MAP_DEBUG +static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, +				struct raid_map_data *map_buff) +{ +	struct raid_map_disk_data *dd = &map_buff->data[0]; +	int map, row, col; +	u16 map_cnt, row_cnt, disks_per_row; + +	if (rc != 0) +		return; + +	/* Show details only if debugging has been activated. */ +	if (h->raid_offload_debug < 2) +		return; + +	dev_info(&h->pdev->dev, "structure_size = %u\n", +				le32_to_cpu(map_buff->structure_size)); +	dev_info(&h->pdev->dev, "volume_blk_size = %u\n", +			le32_to_cpu(map_buff->volume_blk_size)); +	dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", +			le64_to_cpu(map_buff->volume_blk_cnt)); +	dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", +			map_buff->phys_blk_shift); +	dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", +			map_buff->parity_rotation_shift); +	dev_info(&h->pdev->dev, "strip_size = %u\n", +			le16_to_cpu(map_buff->strip_size)); +	dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", +			le64_to_cpu(map_buff->disk_starting_blk)); +	dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", +			le64_to_cpu(map_buff->disk_blk_cnt)); +	dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", +			le16_to_cpu(map_buff->data_disks_per_row)); +	dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", +			le16_to_cpu(map_buff->metadata_disks_per_row)); +	dev_info(&h->pdev->dev, "row_cnt = %u\n", +			le16_to_cpu(map_buff->row_cnt)); +	dev_info(&h->pdev->dev, "layout_map_count = %u\n", +			le16_to_cpu(map_buff->layout_map_count)); +	dev_info(&h->pdev->dev, "flags = %u\n", +			le16_to_cpu(map_buff->flags)); +	if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON) +		dev_info(&h->pdev->dev, "encrypytion = ON\n"); +	else +		dev_info(&h->pdev->dev, "encrypytion = OFF\n"); +	dev_info(&h->pdev->dev, "dekindex = %u\n", +			le16_to_cpu(map_buff->dekindex)); + +	map_cnt = le16_to_cpu(map_buff->layout_map_count); +	for (map = 0; map < map_cnt; map++) { +		dev_info(&h->pdev->dev, "Map%u:\n", map); +		row_cnt = le16_to_cpu(map_buff->row_cnt); +		for (row = 0; row < row_cnt; row++) { +			dev_info(&h->pdev->dev, "  Row%u:\n", row); +			disks_per_row = +				le16_to_cpu(map_buff->data_disks_per_row); +			for (col = 0; col < disks_per_row; col++, dd++) +				dev_info(&h->pdev->dev, +					"    D%02u: h=0x%04x xor=%u,%u\n", +					col, dd->ioaccel_handle, +					dd->xor_mult[0], dd->xor_mult[1]); +			disks_per_row = +				le16_to_cpu(map_buff->metadata_disks_per_row); +			for (col = 0; col < disks_per_row; col++, dd++) +				dev_info(&h->pdev->dev, +					"    M%02u: h=0x%04x xor=%u,%u\n", +					col, dd->ioaccel_handle, +					dd->xor_mult[0], dd->xor_mult[1]); +		} +	} +} +#else +static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, +			__attribute__((unused)) int rc, +			__attribute__((unused)) struct raid_map_data *map_buff) +{ +} +#endif + +static int hpsa_get_raid_map(struct ctlr_info *h, +	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ +	int rc = 0; +	struct CommandList *c; +	struct ErrorInfo *ei; + +	c = cmd_special_alloc(h); +	if (c == NULL) { +		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); +		return -ENOMEM; +	} +	if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, +			sizeof(this_device->raid_map), 0, +			scsi3addr, TYPE_CMD)) { +		dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); +		cmd_special_free(h, c); +		return -ENOMEM; +	} +	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); +	ei = c->err_info; +	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { +		hpsa_scsi_interpret_error(h, c); +		cmd_special_free(h, c); +		return -1; +	} +	cmd_special_free(h, c); + +	/* @todo in the future, dynamically allocate RAID map memory */ +	if (le32_to_cpu(this_device->raid_map.structure_size) > +				sizeof(this_device->raid_map)) { +		dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); +		rc = -1; +	} +	hpsa_debug_map_buff(h, rc, &this_device->raid_map); +	return rc; +} + +static int hpsa_vpd_page_supported(struct ctlr_info *h, +	unsigned char scsi3addr[], u8 page) +{ +	int rc; +	int i; +	int pages; +	unsigned char *buf, bufsize; + +	buf = kzalloc(256, GFP_KERNEL); +	if (!buf) +		return 0; + +	/* Get the size of the page list first */ +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, +				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, +				buf, HPSA_VPD_HEADER_SZ); +	if (rc != 0) +		goto exit_unsupported; +	pages = buf[3]; +	if ((pages + HPSA_VPD_HEADER_SZ) <= 255) +		bufsize = pages + HPSA_VPD_HEADER_SZ; +	else +		bufsize = 255; + +	/* Get the whole VPD page list */ +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, +				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, +				buf, bufsize); +	if (rc != 0) +		goto exit_unsupported; + +	pages = buf[3]; +	for (i = 1; i <= pages; i++) +		if (buf[3 + i] == page) +			goto exit_supported; +exit_unsupported: +	kfree(buf); +	return 0; +exit_supported: +	kfree(buf); +	return 1; +} + +static void hpsa_get_ioaccel_status(struct ctlr_info *h, +	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ +	int rc; +	unsigned char *buf; +	u8 ioaccel_status; + +	this_device->offload_config = 0; +	this_device->offload_enabled = 0; + +	buf = kzalloc(64, GFP_KERNEL); +	if (!buf) +		return; +	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) +		goto out; +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, +			VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); +	if (rc != 0) +		goto out; + +#define IOACCEL_STATUS_BYTE 4 +#define OFFLOAD_CONFIGURED_BIT 0x01 +#define OFFLOAD_ENABLED_BIT 0x02 +	ioaccel_status = buf[IOACCEL_STATUS_BYTE]; +	this_device->offload_config = +		!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); +	if (this_device->offload_config) { +		this_device->offload_enabled = +			!!(ioaccel_status & OFFLOAD_ENABLED_BIT); +		if (hpsa_get_raid_map(h, scsi3addr, this_device)) +			this_device->offload_enabled = 0; +	} +out: +	kfree(buf); +	return; +} +  /* Get the device id from inquiry page 0x83 */  static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,  	unsigned char *device_id, int buflen) @@ -1652,8 +2430,8 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,  		buflen = 16;  	buf = kzalloc(64, GFP_KERNEL);  	if (!buf) -		return -1; -	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); +		return -ENOMEM; +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);  	if (rc == 0)  		memcpy(device_id, &buf[8], buflen);  	kfree(buf); @@ -1687,8 +2465,16 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,  	ei = c->err_info;  	if (ei->CommandStatus != 0 &&  	    ei->CommandStatus != CMD_DATA_UNDERRUN) { -		hpsa_scsi_interpret_error(c); +		hpsa_scsi_interpret_error(h, c);  		rc = -1; +	} else { +		if (buf->extended_response_flag != extended_response) { +			dev_err(&h->pdev->dev, +				"report luns requested format %u, got %u\n", +				extended_response, +				buf->extended_response_flag); +			rc = -1; +		}  	}  out:  	cmd_special_free(h, c); @@ -1716,6 +2502,111 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,  	device->lun = lun;  } +/* Use VPD inquiry to get details of volume status */ +static int hpsa_get_volume_status(struct ctlr_info *h, +					unsigned char scsi3addr[]) +{ +	int rc; +	int status; +	int size; +	unsigned char *buf; + +	buf = kzalloc(64, GFP_KERNEL); +	if (!buf) +		return HPSA_VPD_LV_STATUS_UNSUPPORTED; + +	/* Does controller have VPD for logical volume status? */ +	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) +		goto exit_failed; + +	/* Get the size of the VPD return buffer */ +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, +					buf, HPSA_VPD_HEADER_SZ); +	if (rc != 0) +		goto exit_failed; +	size = buf[3]; + +	/* Now get the whole VPD buffer */ +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, +					buf, size + HPSA_VPD_HEADER_SZ); +	if (rc != 0) +		goto exit_failed; +	status = buf[4]; /* status byte */ + +	kfree(buf); +	return status; +exit_failed: +	kfree(buf); +	return HPSA_VPD_LV_STATUS_UNSUPPORTED; +} + +/* Determine offline status of a volume. + * Return either: + *  0 (not offline) + *  0xff (offline for unknown reasons) + *  # (integer code indicating one of several NOT READY states + *     describing why a volume is to be kept offline) + */ +static int hpsa_volume_offline(struct ctlr_info *h, +					unsigned char scsi3addr[]) +{ +	struct CommandList *c; +	unsigned char *sense, sense_key, asc, ascq; +	int ldstat = 0; +	u16 cmd_status; +	u8 scsi_status; +#define ASC_LUN_NOT_READY 0x04 +#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 +#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 + +	c = cmd_alloc(h); +	if (!c) +		return 0; +	(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); +	hpsa_scsi_do_simple_cmd_core(h, c); +	sense = c->err_info->SenseInfo; +	sense_key = sense[2]; +	asc = sense[12]; +	ascq = sense[13]; +	cmd_status = c->err_info->CommandStatus; +	scsi_status = c->err_info->ScsiStatus; +	cmd_free(h, c); +	/* Is the volume 'not ready'? */ +	if (cmd_status != CMD_TARGET_STATUS || +		scsi_status != SAM_STAT_CHECK_CONDITION || +		sense_key != NOT_READY || +		asc != ASC_LUN_NOT_READY)  { +		return 0; +	} + +	/* Determine the reason for not ready state */ +	ldstat = hpsa_get_volume_status(h, scsi3addr); + +	/* Keep volume offline in certain cases: */ +	switch (ldstat) { +	case HPSA_LV_UNDERGOING_ERASE: +	case HPSA_LV_UNDERGOING_RPI: +	case HPSA_LV_PENDING_RPI: +	case HPSA_LV_ENCRYPTED_NO_KEY: +	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: +	case HPSA_LV_UNDERGOING_ENCRYPTION: +	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: +	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: +		return ldstat; +	case HPSA_VPD_LV_STATUS_UNSUPPORTED: +		/* If VPD status page isn't available, +		 * use ASC/ASCQ to determine state +		 */ +		if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || +			(ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) +			return ldstat; +		break; +	default: +		break; +	} +	return 0; +} +  static int hpsa_update_device_info(struct ctlr_info *h,  	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,  	unsigned char *is_OBDR_device) @@ -1754,10 +2645,22 @@ static int hpsa_update_device_info(struct ctlr_info *h,  		sizeof(this_device->device_id));  	if (this_device->devtype == TYPE_DISK && -		is_logical_dev_addr_mode(scsi3addr)) +		is_logical_dev_addr_mode(scsi3addr)) { +		int volume_offline; +  		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); -	else +		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) +			hpsa_get_ioaccel_status(h, scsi3addr, this_device); +		volume_offline = hpsa_volume_offline(h, scsi3addr); +		if (volume_offline < 0 || volume_offline > 0xff) +			volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; +		this_device->volume_offline = volume_offline & 0xff; +	} else {  		this_device->raid_level = RAID_UNKNOWN; +		this_device->offload_config = 0; +		this_device->offload_enabled = 0; +		this_device->volume_offline = 0; +	}  	if (is_OBDR_device) {  		/* See if this is a One-Button-Disaster-Recovery device @@ -1783,6 +2686,7 @@ static unsigned char *ext_target_model[] = {  	"MSA2312",  	"MSA2324",  	"P2000 G3 SAS", +	"MSA 2040 SAS",  	NULL,  }; @@ -1886,6 +2790,101 @@ static int add_ext_target_dev(struct ctlr_info *h,  }  /* + * Get address of physical disk used for an ioaccel2 mode command: + *	1. Extract ioaccel2 handle from the command. + *	2. Find a matching ioaccel2 handle from list of physical disks. + *	3. Return: + *		1 and set scsi3addr to address of matching physical + *		0 if no matching physical disk was found. + */ +static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, +	struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) +{ +	struct ReportExtendedLUNdata *physicals = NULL; +	int responsesize = 24;	/* size of physical extended response */ +	int extended = 2;	/* flag forces reporting 'other dev info'. */ +	int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; +	u32 nphysicals = 0;	/* number of reported physical devs */ +	int found = 0;		/* found match (1) or not (0) */ +	u32 find;		/* handle we need to match */ +	int i; +	struct scsi_cmnd *scmd;	/* scsi command within request being aborted */ +	struct hpsa_scsi_dev_t *d; /* device of request being aborted */ +	struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ +	u32 it_nexus;		/* 4 byte device handle for the ioaccel2 cmd */ +	u32 scsi_nexus;		/* 4 byte device handle for the ioaccel2 cmd */ + +	if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) +		return 0; /* no match */ + +	/* point to the ioaccel2 device handle */ +	c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; +	if (c2a == NULL) +		return 0; /* no match */ + +	scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; +	if (scmd == NULL) +		return 0; /* no match */ + +	d = scmd->device->hostdata; +	if (d == NULL) +		return 0; /* no match */ + +	it_nexus = cpu_to_le32((u32) d->ioaccel_handle); +	scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus); +	find = c2a->scsi_nexus; + +	if (h->raid_offload_debug > 0) +		dev_info(&h->pdev->dev, +			"%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", +			__func__, scsi_nexus, +			d->device_id[0], d->device_id[1], d->device_id[2], +			d->device_id[3], d->device_id[4], d->device_id[5], +			d->device_id[6], d->device_id[7], d->device_id[8], +			d->device_id[9], d->device_id[10], d->device_id[11], +			d->device_id[12], d->device_id[13], d->device_id[14], +			d->device_id[15]); + +	/* Get the list of physical devices */ +	physicals = kzalloc(reportsize, GFP_KERNEL); +	if (physicals == NULL) +		return 0; +	if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, +		reportsize, extended)) { +		dev_err(&h->pdev->dev, +			"Can't lookup %s device handle: report physical LUNs failed.\n", +			"HP SSD Smart Path"); +		kfree(physicals); +		return 0; +	} +	nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / +							responsesize; + +	/* find ioaccel2 handle in list of physicals: */ +	for (i = 0; i < nphysicals; i++) { +		struct ext_report_lun_entry *entry = &physicals->LUN[i]; + +		/* handle is in bytes 28-31 of each lun */ +		if (entry->ioaccel_handle != find) +			continue; /* didn't match */ +		found = 1; +		memcpy(scsi3addr, entry->lunid, 8); +		if (h->raid_offload_debug > 0) +			dev_info(&h->pdev->dev, +				"%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", +				__func__, find, +				entry->ioaccel_handle, scsi3addr); +		break; /* found it */ +	} + +	kfree(physicals); +	if (found) +		return 1; +	else +		return 0; + +} +/*   * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,   * logdev.  The number of luns in physdev and logdev are returned in   * *nphysicals and *nlogicals, respectively. @@ -1893,14 +2892,26 @@ static int add_ext_target_dev(struct ctlr_info *h,   */  static int hpsa_gather_lun_info(struct ctlr_info *h,  	int reportlunsize, -	struct ReportLUNdata *physdev, u32 *nphysicals, +	struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,  	struct ReportLUNdata *logdev, u32 *nlogicals)  { -	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { +	int physical_entry_size = 8; + +	*physical_mode = 0; + +	/* For I/O accelerator mode we need to read physical device handles */ +	if (h->transMethod & CFGTBL_Trans_io_accel1 || +		h->transMethod & CFGTBL_Trans_io_accel2) { +		*physical_mode = HPSA_REPORT_PHYS_EXTENDED; +		physical_entry_size = 24; +	} +	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, +							*physical_mode)) {  		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");  		return -1;  	} -	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; +	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / +							physical_entry_size;  	if (*nphysicals > HPSA_MAX_PHYS_LUN) {  		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."  			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, @@ -1931,7 +2942,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,  }  u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, -	int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, +	int nphysicals, int nlogicals, +	struct ReportExtendedLUNdata *physdev_list,  	struct ReportLUNdata *logdev_list)  {  	/* Helper function, figure out where the LUN ID info is coming from @@ -1946,7 +2958,8 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,  		return RAID_CTLR_LUNID;  	if (i < logicals_start) -		return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; +		return &physdev_list->LUN[i - +				(raid_ctlr_position == 0)].lunid[0];  	if (i < last_device)  		return &logdev_list->LUN[i - nphysicals - @@ -1955,6 +2968,29 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,  	return NULL;  } +static int hpsa_hba_mode_enabled(struct ctlr_info *h) +{ +	int rc; +	int hba_mode_enabled; +	struct bmic_controller_parameters *ctlr_params; +	ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), +		GFP_KERNEL); + +	if (!ctlr_params) +		return -ENOMEM; +	rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, +		sizeof(struct bmic_controller_parameters)); +	if (rc) { +		kfree(ctlr_params); +		return rc; +	} + +	hba_mode_enabled = +		((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); +	kfree(ctlr_params); +	return hba_mode_enabled; +} +  static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  {  	/* the idea here is we could get notified @@ -1967,16 +3003,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  	 * tell which devices we already know about, vs. new  	 * devices, vs.  disappearing devices.  	 */ -	struct ReportLUNdata *physdev_list = NULL; +	struct ReportExtendedLUNdata *physdev_list = NULL;  	struct ReportLUNdata *logdev_list = NULL;  	u32 nphysicals = 0;  	u32 nlogicals = 0; +	int physical_mode = 0;  	u32 ndev_allocated = 0;  	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;  	int ncurrent = 0; -	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; +	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;  	int i, n_ext_target_devs, ndevs_to_allocate;  	int raid_ctlr_position; +	int rescan_hba_mode;  	DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);  	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); @@ -1990,8 +3028,20 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  	}  	memset(lunzerobits, 0, sizeof(lunzerobits)); -	if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, -			logdev_list, &nlogicals)) +	rescan_hba_mode = hpsa_hba_mode_enabled(h); +	if (rescan_hba_mode < 0) +		goto out; + +	if (!h->hba_mode_enabled && rescan_hba_mode) +		dev_warn(&h->pdev->dev, "HBA mode enabled\n"); +	else if (h->hba_mode_enabled && !rescan_hba_mode) +		dev_warn(&h->pdev->dev, "HBA mode disabled\n"); + +	h->hba_mode_enabled = rescan_hba_mode; + +	if (hpsa_gather_lun_info(h, reportlunsize, +			(struct ReportLUNdata *) physdev_list, &nphysicals, +			&physical_mode, logdev_list, &nlogicals))  		goto out;  	/* We might see up to the maximum number of logical and physical disks @@ -2018,7 +3068,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  		ndev_allocated++;  	} -	if (unlikely(is_scsi_rev_5(h))) +	if (is_scsi_rev_5(h))  		raid_ctlr_position = 0;  	else  		raid_ctlr_position = nphysicals + nlogicals; @@ -2072,9 +3122,28 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  				ncurrent++;  			break;  		case TYPE_DISK: -			if (i < nphysicals) +			if (h->hba_mode_enabled) { +				/* never use raid mapper in HBA mode */ +				this_device->offload_enabled = 0; +				ncurrent++;  				break; -			ncurrent++; +			} else if (h->acciopath_status) { +				if (i >= nphysicals) { +					ncurrent++; +					break; +				} +			} else { +				if (i < nphysicals) +					break; +				ncurrent++; +				break; +			} +			if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { +				memcpy(&this_device->ioaccel_handle, +					&lunaddrbytes[20], +					sizeof(this_device->ioaccel_handle)); +				ncurrent++; +			}  			break;  		case TYPE_TAPE:  		case TYPE_MEDIUM_CHANGER: @@ -2144,7 +3213,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,  		curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);  		curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);  		curr_sg->Len = len; -		curr_sg->Ext = 0;  /* we are not chaining */ +		curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;  		curr_sg++;  	} @@ -2168,6 +3237,726 @@ sglist_finished:  	return 0;  } +#define IO_ACCEL_INELIGIBLE (1) +static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) +{ +	int is_write = 0; +	u32 block; +	u32 block_cnt; + +	/* Perform some CDB fixups if needed using 10 byte reads/writes only */ +	switch (cdb[0]) { +	case WRITE_6: +	case WRITE_12: +		is_write = 1; +	case READ_6: +	case READ_12: +		if (*cdb_len == 6) { +			block = (((u32) cdb[2]) << 8) | cdb[3]; +			block_cnt = cdb[4]; +		} else { +			BUG_ON(*cdb_len != 12); +			block = (((u32) cdb[2]) << 24) | +				(((u32) cdb[3]) << 16) | +				(((u32) cdb[4]) << 8) | +				cdb[5]; +			block_cnt = +				(((u32) cdb[6]) << 24) | +				(((u32) cdb[7]) << 16) | +				(((u32) cdb[8]) << 8) | +				cdb[9]; +		} +		if (block_cnt > 0xffff) +			return IO_ACCEL_INELIGIBLE; + +		cdb[0] = is_write ? WRITE_10 : READ_10; +		cdb[1] = 0; +		cdb[2] = (u8) (block >> 24); +		cdb[3] = (u8) (block >> 16); +		cdb[4] = (u8) (block >> 8); +		cdb[5] = (u8) (block); +		cdb[6] = 0; +		cdb[7] = (u8) (block_cnt >> 8); +		cdb[8] = (u8) (block_cnt); +		cdb[9] = 0; +		*cdb_len = 10; +		break; +	} +	return 0; +} + +static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, +	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, +	u8 *scsi3addr) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; +	unsigned int len; +	unsigned int total_len = 0; +	struct scatterlist *sg; +	u64 addr64; +	int use_sg, i; +	struct SGDescriptor *curr_sg; +	u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; + +	/* TODO: implement chaining support */ +	if (scsi_sg_count(cmd) > h->ioaccel_maxsg) +		return IO_ACCEL_INELIGIBLE; + +	BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); + +	if (fixup_ioaccel_cdb(cdb, &cdb_len)) +		return IO_ACCEL_INELIGIBLE; + +	c->cmd_type = CMD_IOACCEL1; + +	/* Adjust the DMA address to point to the accelerated command buffer */ +	c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + +				(c->cmdindex * sizeof(*cp)); +	BUG_ON(c->busaddr & 0x0000007F); + +	use_sg = scsi_dma_map(cmd); +	if (use_sg < 0) +		return use_sg; + +	if (use_sg) { +		curr_sg = cp->SG; +		scsi_for_each_sg(cmd, sg, use_sg, i) { +			addr64 = (u64) sg_dma_address(sg); +			len  = sg_dma_len(sg); +			total_len += len; +			curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); +			curr_sg->Addr.upper = +				(u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); +			curr_sg->Len = len; + +			if (i == (scsi_sg_count(cmd) - 1)) +				curr_sg->Ext = HPSA_SG_LAST; +			else +				curr_sg->Ext = 0;  /* we are not chaining */ +			curr_sg++; +		} + +		switch (cmd->sc_data_direction) { +		case DMA_TO_DEVICE: +			control |= IOACCEL1_CONTROL_DATA_OUT; +			break; +		case DMA_FROM_DEVICE: +			control |= IOACCEL1_CONTROL_DATA_IN; +			break; +		case DMA_NONE: +			control |= IOACCEL1_CONTROL_NODATAXFER; +			break; +		default: +			dev_err(&h->pdev->dev, "unknown data direction: %d\n", +			cmd->sc_data_direction); +			BUG(); +			break; +		} +	} else { +		control |= IOACCEL1_CONTROL_NODATAXFER; +	} + +	c->Header.SGList = use_sg; +	/* Fill out the command structure to submit */ +	cp->dev_handle = ioaccel_handle & 0xFFFF; +	cp->transfer_len = total_len; +	cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ | +			(cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK); +	cp->control = control; +	memcpy(cp->CDB, cdb, cdb_len); +	memcpy(cp->CISS_LUN, scsi3addr, 8); +	/* Tag was already set at init time. */ +	enqueue_cmd_and_start_io(h, c); +	return 0; +} + +/* + * Queue a command directly to a device behind the controller using the + * I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, +	struct CommandList *c) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + +	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, +		cmd->cmnd, cmd->cmd_len, dev->scsi3addr); +} + +/* + * Set encryption parameters for the ioaccel2 request + */ +static void set_encrypt_ioaccel2(struct ctlr_info *h, +	struct CommandList *c, struct io_accel2_cmd *cp) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; +	struct raid_map_data *map = &dev->raid_map; +	u64 first_block; + +	BUG_ON(!(dev->offload_config && dev->offload_enabled)); + +	/* Are we doing encryption on this device */ +	if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON)) +		return; +	/* Set the data encryption key index. */ +	cp->dekindex = map->dekindex; + +	/* Set the encryption enable flag, encoded into direction field. */ +	cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; + +	/* Set encryption tweak values based on logical block address +	 * If block size is 512, tweak value is LBA. +	 * For other block sizes, tweak is (LBA * block size)/ 512) +	 */ +	switch (cmd->cmnd[0]) { +	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ +	case WRITE_6: +	case READ_6: +		if (map->volume_blk_size == 512) { +			cp->tweak_lower = +				(((u32) cmd->cmnd[2]) << 8) | +					cmd->cmnd[3]; +			cp->tweak_upper = 0; +		} else { +			first_block = +				(((u64) cmd->cmnd[2]) << 8) | +					cmd->cmnd[3]; +			first_block = (first_block * map->volume_blk_size)/512; +			cp->tweak_lower = (u32)first_block; +			cp->tweak_upper = (u32)(first_block >> 32); +		} +		break; +	case WRITE_10: +	case READ_10: +		if (map->volume_blk_size == 512) { +			cp->tweak_lower = +				(((u32) cmd->cmnd[2]) << 24) | +				(((u32) cmd->cmnd[3]) << 16) | +				(((u32) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +			cp->tweak_upper = 0; +		} else { +			first_block = +				(((u64) cmd->cmnd[2]) << 24) | +				(((u64) cmd->cmnd[3]) << 16) | +				(((u64) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +			first_block = (first_block * map->volume_blk_size)/512; +			cp->tweak_lower = (u32)first_block; +			cp->tweak_upper = (u32)(first_block >> 32); +		} +		break; +	/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ +	case WRITE_12: +	case READ_12: +		if (map->volume_blk_size == 512) { +			cp->tweak_lower = +				(((u32) cmd->cmnd[2]) << 24) | +				(((u32) cmd->cmnd[3]) << 16) | +				(((u32) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +			cp->tweak_upper = 0; +		} else { +			first_block = +				(((u64) cmd->cmnd[2]) << 24) | +				(((u64) cmd->cmnd[3]) << 16) | +				(((u64) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +			first_block = (first_block * map->volume_blk_size)/512; +			cp->tweak_lower = (u32)first_block; +			cp->tweak_upper = (u32)(first_block >> 32); +		} +		break; +	case WRITE_16: +	case READ_16: +		if (map->volume_blk_size == 512) { +			cp->tweak_lower = +				(((u32) cmd->cmnd[6]) << 24) | +				(((u32) cmd->cmnd[7]) << 16) | +				(((u32) cmd->cmnd[8]) << 8) | +					cmd->cmnd[9]; +			cp->tweak_upper = +				(((u32) cmd->cmnd[2]) << 24) | +				(((u32) cmd->cmnd[3]) << 16) | +				(((u32) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +		} else { +			first_block = +				(((u64) cmd->cmnd[2]) << 56) | +				(((u64) cmd->cmnd[3]) << 48) | +				(((u64) cmd->cmnd[4]) << 40) | +				(((u64) cmd->cmnd[5]) << 32) | +				(((u64) cmd->cmnd[6]) << 24) | +				(((u64) cmd->cmnd[7]) << 16) | +				(((u64) cmd->cmnd[8]) << 8) | +					cmd->cmnd[9]; +			first_block = (first_block * map->volume_blk_size)/512; +			cp->tweak_lower = (u32)first_block; +			cp->tweak_upper = (u32)(first_block >> 32); +		} +		break; +	default: +		dev_err(&h->pdev->dev, +			"ERROR: %s: IOACCEL request CDB size not supported for encryption\n", +			__func__); +		BUG(); +		break; +	} +} + +static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, +	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, +	u8 *scsi3addr) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; +	struct ioaccel2_sg_element *curr_sg; +	int use_sg, i; +	struct scatterlist *sg; +	u64 addr64; +	u32 len; +	u32 total_len = 0; + +	if (scsi_sg_count(cmd) > h->ioaccel_maxsg) +		return IO_ACCEL_INELIGIBLE; + +	if (fixup_ioaccel_cdb(cdb, &cdb_len)) +		return IO_ACCEL_INELIGIBLE; +	c->cmd_type = CMD_IOACCEL2; +	/* Adjust the DMA address to point to the accelerated command buffer */ +	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + +				(c->cmdindex * sizeof(*cp)); +	BUG_ON(c->busaddr & 0x0000007F); + +	memset(cp, 0, sizeof(*cp)); +	cp->IU_type = IOACCEL2_IU_TYPE; + +	use_sg = scsi_dma_map(cmd); +	if (use_sg < 0) +		return use_sg; + +	if (use_sg) { +		BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); +		curr_sg = cp->sg; +		scsi_for_each_sg(cmd, sg, use_sg, i) { +			addr64 = (u64) sg_dma_address(sg); +			len  = sg_dma_len(sg); +			total_len += len; +			curr_sg->address = cpu_to_le64(addr64); +			curr_sg->length = cpu_to_le32(len); +			curr_sg->reserved[0] = 0; +			curr_sg->reserved[1] = 0; +			curr_sg->reserved[2] = 0; +			curr_sg->chain_indicator = 0; +			curr_sg++; +		} + +		switch (cmd->sc_data_direction) { +		case DMA_TO_DEVICE: +			cp->direction &= ~IOACCEL2_DIRECTION_MASK; +			cp->direction |= IOACCEL2_DIR_DATA_OUT; +			break; +		case DMA_FROM_DEVICE: +			cp->direction &= ~IOACCEL2_DIRECTION_MASK; +			cp->direction |= IOACCEL2_DIR_DATA_IN; +			break; +		case DMA_NONE: +			cp->direction &= ~IOACCEL2_DIRECTION_MASK; +			cp->direction |= IOACCEL2_DIR_NO_DATA; +			break; +		default: +			dev_err(&h->pdev->dev, "unknown data direction: %d\n", +				cmd->sc_data_direction); +			BUG(); +			break; +		} +	} else { +		cp->direction &= ~IOACCEL2_DIRECTION_MASK; +		cp->direction |= IOACCEL2_DIR_NO_DATA; +	} + +	/* Set encryption parameters, if necessary */ +	set_encrypt_ioaccel2(h, c, cp); + +	cp->scsi_nexus = ioaccel_handle; +	cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | +				DIRECT_LOOKUP_BIT; +	memcpy(cp->cdb, cdb, sizeof(cp->cdb)); + +	/* fill in sg elements */ +	cp->sg_count = (u8) use_sg; + +	cp->data_len = cpu_to_le32(total_len); +	cp->err_ptr = cpu_to_le64(c->busaddr + +			offsetof(struct io_accel2_cmd, error_data)); +	cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data)); + +	enqueue_cmd_and_start_io(h, c); +	return 0; +} + +/* + * Queue a command to the correct I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, +	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, +	u8 *scsi3addr) +{ +	if (h->transMethod & CFGTBL_Trans_io_accel1) +		return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, +						cdb, cdb_len, scsi3addr); +	else +		return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, +						cdb, cdb_len, scsi3addr); +} + +static void raid_map_helper(struct raid_map_data *map, +		int offload_to_mirror, u32 *map_index, u32 *current_group) +{ +	if (offload_to_mirror == 0)  { +		/* use physical disk in the first mirrored group. */ +		*map_index %= map->data_disks_per_row; +		return; +	} +	do { +		/* determine mirror group that *map_index indicates */ +		*current_group = *map_index / map->data_disks_per_row; +		if (offload_to_mirror == *current_group) +			continue; +		if (*current_group < (map->layout_map_count - 1)) { +			/* select map index from next group */ +			*map_index += map->data_disks_per_row; +			(*current_group)++; +		} else { +			/* select map index from first group */ +			*map_index %= map->data_disks_per_row; +			*current_group = 0; +		} +	} while (offload_to_mirror != *current_group); +} + +/* + * Attempt to perform offload RAID mapping for a logical volume I/O. + */ +static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, +	struct CommandList *c) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; +	struct raid_map_data *map = &dev->raid_map; +	struct raid_map_disk_data *dd = &map->data[0]; +	int is_write = 0; +	u32 map_index; +	u64 first_block, last_block; +	u32 block_cnt; +	u32 blocks_per_row; +	u64 first_row, last_row; +	u32 first_row_offset, last_row_offset; +	u32 first_column, last_column; +	u64 r0_first_row, r0_last_row; +	u32 r5or6_blocks_per_row; +	u64 r5or6_first_row, r5or6_last_row; +	u32 r5or6_first_row_offset, r5or6_last_row_offset; +	u32 r5or6_first_column, r5or6_last_column; +	u32 total_disks_per_row; +	u32 stripesize; +	u32 first_group, last_group, current_group; +	u32 map_row; +	u32 disk_handle; +	u64 disk_block; +	u32 disk_block_cnt; +	u8 cdb[16]; +	u8 cdb_len; +#if BITS_PER_LONG == 32 +	u64 tmpdiv; +#endif +	int offload_to_mirror; + +	BUG_ON(!(dev->offload_config && dev->offload_enabled)); + +	/* check for valid opcode, get LBA and block count */ +	switch (cmd->cmnd[0]) { +	case WRITE_6: +		is_write = 1; +	case READ_6: +		first_block = +			(((u64) cmd->cmnd[2]) << 8) | +			cmd->cmnd[3]; +		block_cnt = cmd->cmnd[4]; +		break; +	case WRITE_10: +		is_write = 1; +	case READ_10: +		first_block = +			(((u64) cmd->cmnd[2]) << 24) | +			(((u64) cmd->cmnd[3]) << 16) | +			(((u64) cmd->cmnd[4]) << 8) | +			cmd->cmnd[5]; +		block_cnt = +			(((u32) cmd->cmnd[7]) << 8) | +			cmd->cmnd[8]; +		break; +	case WRITE_12: +		is_write = 1; +	case READ_12: +		first_block = +			(((u64) cmd->cmnd[2]) << 24) | +			(((u64) cmd->cmnd[3]) << 16) | +			(((u64) cmd->cmnd[4]) << 8) | +			cmd->cmnd[5]; +		block_cnt = +			(((u32) cmd->cmnd[6]) << 24) | +			(((u32) cmd->cmnd[7]) << 16) | +			(((u32) cmd->cmnd[8]) << 8) | +		cmd->cmnd[9]; +		break; +	case WRITE_16: +		is_write = 1; +	case READ_16: +		first_block = +			(((u64) cmd->cmnd[2]) << 56) | +			(((u64) cmd->cmnd[3]) << 48) | +			(((u64) cmd->cmnd[4]) << 40) | +			(((u64) cmd->cmnd[5]) << 32) | +			(((u64) cmd->cmnd[6]) << 24) | +			(((u64) cmd->cmnd[7]) << 16) | +			(((u64) cmd->cmnd[8]) << 8) | +			cmd->cmnd[9]; +		block_cnt = +			(((u32) cmd->cmnd[10]) << 24) | +			(((u32) cmd->cmnd[11]) << 16) | +			(((u32) cmd->cmnd[12]) << 8) | +			cmd->cmnd[13]; +		break; +	default: +		return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ +	} +	BUG_ON(block_cnt == 0); +	last_block = first_block + block_cnt - 1; + +	/* check for write to non-RAID-0 */ +	if (is_write && dev->raid_level != 0) +		return IO_ACCEL_INELIGIBLE; + +	/* check for invalid block or wraparound */ +	if (last_block >= map->volume_blk_cnt || last_block < first_block) +		return IO_ACCEL_INELIGIBLE; + +	/* calculate stripe information for the request */ +	blocks_per_row = map->data_disks_per_row * map->strip_size; +#if BITS_PER_LONG == 32 +	tmpdiv = first_block; +	(void) do_div(tmpdiv, blocks_per_row); +	first_row = tmpdiv; +	tmpdiv = last_block; +	(void) do_div(tmpdiv, blocks_per_row); +	last_row = tmpdiv; +	first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); +	last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); +	tmpdiv = first_row_offset; +	(void) do_div(tmpdiv,  map->strip_size); +	first_column = tmpdiv; +	tmpdiv = last_row_offset; +	(void) do_div(tmpdiv, map->strip_size); +	last_column = tmpdiv; +#else +	first_row = first_block / blocks_per_row; +	last_row = last_block / blocks_per_row; +	first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); +	last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); +	first_column = first_row_offset / map->strip_size; +	last_column = last_row_offset / map->strip_size; +#endif + +	/* if this isn't a single row/column then give to the controller */ +	if ((first_row != last_row) || (first_column != last_column)) +		return IO_ACCEL_INELIGIBLE; + +	/* proceeding with driver mapping */ +	total_disks_per_row = map->data_disks_per_row + +				map->metadata_disks_per_row; +	map_row = ((u32)(first_row >> map->parity_rotation_shift)) % +				map->row_cnt; +	map_index = (map_row * total_disks_per_row) + first_column; + +	switch (dev->raid_level) { +	case HPSA_RAID_0: +		break; /* nothing special to do */ +	case HPSA_RAID_1: +		/* Handles load balance across RAID 1 members. +		 * (2-drive R1 and R10 with even # of drives.) +		 * Appropriate for SSDs, not optimal for HDDs +		 */ +		BUG_ON(map->layout_map_count != 2); +		if (dev->offload_to_mirror) +			map_index += map->data_disks_per_row; +		dev->offload_to_mirror = !dev->offload_to_mirror; +		break; +	case HPSA_RAID_ADM: +		/* Handles N-way mirrors  (R1-ADM) +		 * and R10 with # of drives divisible by 3.) +		 */ +		BUG_ON(map->layout_map_count != 3); + +		offload_to_mirror = dev->offload_to_mirror; +		raid_map_helper(map, offload_to_mirror, +				&map_index, ¤t_group); +		/* set mirror group to use next time */ +		offload_to_mirror = +			(offload_to_mirror >= map->layout_map_count - 1) +			? 0 : offload_to_mirror + 1; +		/* FIXME: remove after debug/dev */ +		BUG_ON(offload_to_mirror >= map->layout_map_count); +		dev_warn(&h->pdev->dev, +			"DEBUG: Using physical disk map index %d from mirror group %d\n", +			map_index, offload_to_mirror); +		dev->offload_to_mirror = offload_to_mirror; +		/* Avoid direct use of dev->offload_to_mirror within this +		 * function since multiple threads might simultaneously +		 * increment it beyond the range of dev->layout_map_count -1. +		 */ +		break; +	case HPSA_RAID_5: +	case HPSA_RAID_6: +		if (map->layout_map_count <= 1) +			break; + +		/* Verify first and last block are in same RAID group */ +		r5or6_blocks_per_row = +			map->strip_size * map->data_disks_per_row; +		BUG_ON(r5or6_blocks_per_row == 0); +		stripesize = r5or6_blocks_per_row * map->layout_map_count; +#if BITS_PER_LONG == 32 +		tmpdiv = first_block; +		first_group = do_div(tmpdiv, stripesize); +		tmpdiv = first_group; +		(void) do_div(tmpdiv, r5or6_blocks_per_row); +		first_group = tmpdiv; +		tmpdiv = last_block; +		last_group = do_div(tmpdiv, stripesize); +		tmpdiv = last_group; +		(void) do_div(tmpdiv, r5or6_blocks_per_row); +		last_group = tmpdiv; +#else +		first_group = (first_block % stripesize) / r5or6_blocks_per_row; +		last_group = (last_block % stripesize) / r5or6_blocks_per_row; +#endif +		if (first_group != last_group) +			return IO_ACCEL_INELIGIBLE; + +		/* Verify request is in a single row of RAID 5/6 */ +#if BITS_PER_LONG == 32 +		tmpdiv = first_block; +		(void) do_div(tmpdiv, stripesize); +		first_row = r5or6_first_row = r0_first_row = tmpdiv; +		tmpdiv = last_block; +		(void) do_div(tmpdiv, stripesize); +		r5or6_last_row = r0_last_row = tmpdiv; +#else +		first_row = r5or6_first_row = r0_first_row = +						first_block / stripesize; +		r5or6_last_row = r0_last_row = last_block / stripesize; +#endif +		if (r5or6_first_row != r5or6_last_row) +			return IO_ACCEL_INELIGIBLE; + + +		/* Verify request is in a single column */ +#if BITS_PER_LONG == 32 +		tmpdiv = first_block; +		first_row_offset = do_div(tmpdiv, stripesize); +		tmpdiv = first_row_offset; +		first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); +		r5or6_first_row_offset = first_row_offset; +		tmpdiv = last_block; +		r5or6_last_row_offset = do_div(tmpdiv, stripesize); +		tmpdiv = r5or6_last_row_offset; +		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); +		tmpdiv = r5or6_first_row_offset; +		(void) do_div(tmpdiv, map->strip_size); +		first_column = r5or6_first_column = tmpdiv; +		tmpdiv = r5or6_last_row_offset; +		(void) do_div(tmpdiv, map->strip_size); +		r5or6_last_column = tmpdiv; +#else +		first_row_offset = r5or6_first_row_offset = +			(u32)((first_block % stripesize) % +						r5or6_blocks_per_row); + +		r5or6_last_row_offset = +			(u32)((last_block % stripesize) % +						r5or6_blocks_per_row); + +		first_column = r5or6_first_column = +			r5or6_first_row_offset / map->strip_size; +		r5or6_last_column = +			r5or6_last_row_offset / map->strip_size; +#endif +		if (r5or6_first_column != r5or6_last_column) +			return IO_ACCEL_INELIGIBLE; + +		/* Request is eligible */ +		map_row = ((u32)(first_row >> map->parity_rotation_shift)) % +			map->row_cnt; + +		map_index = (first_group * +			(map->row_cnt * total_disks_per_row)) + +			(map_row * total_disks_per_row) + first_column; +		break; +	default: +		return IO_ACCEL_INELIGIBLE; +	} + +	disk_handle = dd[map_index].ioaccel_handle; +	disk_block = map->disk_starting_blk + (first_row * map->strip_size) + +			(first_row_offset - (first_column * map->strip_size)); +	disk_block_cnt = block_cnt; + +	/* handle differing logical/physical block sizes */ +	if (map->phys_blk_shift) { +		disk_block <<= map->phys_blk_shift; +		disk_block_cnt <<= map->phys_blk_shift; +	} +	BUG_ON(disk_block_cnt > 0xffff); + +	/* build the new CDB for the physical disk I/O */ +	if (disk_block > 0xffffffff) { +		cdb[0] = is_write ? WRITE_16 : READ_16; +		cdb[1] = 0; +		cdb[2] = (u8) (disk_block >> 56); +		cdb[3] = (u8) (disk_block >> 48); +		cdb[4] = (u8) (disk_block >> 40); +		cdb[5] = (u8) (disk_block >> 32); +		cdb[6] = (u8) (disk_block >> 24); +		cdb[7] = (u8) (disk_block >> 16); +		cdb[8] = (u8) (disk_block >> 8); +		cdb[9] = (u8) (disk_block); +		cdb[10] = (u8) (disk_block_cnt >> 24); +		cdb[11] = (u8) (disk_block_cnt >> 16); +		cdb[12] = (u8) (disk_block_cnt >> 8); +		cdb[13] = (u8) (disk_block_cnt); +		cdb[14] = 0; +		cdb[15] = 0; +		cdb_len = 16; +	} else { +		cdb[0] = is_write ? WRITE_10 : READ_10; +		cdb[1] = 0; +		cdb[2] = (u8) (disk_block >> 24); +		cdb[3] = (u8) (disk_block >> 16); +		cdb[4] = (u8) (disk_block >> 8); +		cdb[5] = (u8) (disk_block); +		cdb[6] = 0; +		cdb[7] = (u8) (disk_block_cnt >> 8); +		cdb[8] = (u8) (disk_block_cnt); +		cdb[9] = 0; +		cdb_len = 10; +	} +	return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, +						dev->scsi3addr); +}  static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  	void (*done)(struct scsi_cmnd *)) @@ -2176,7 +3965,7 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  	struct hpsa_scsi_dev_t *dev;  	unsigned char scsi3addr[8];  	struct CommandList *c; -	unsigned long flags; +	int rc = 0;  	/* Get the ptr to our adapter structure out of cmd->host. */  	h = sdev_to_hba(cmd->device); @@ -2188,14 +3977,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  	}  	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); -	spin_lock_irqsave(&h->lock, flags); -	if (unlikely(h->lockup_detected)) { -		spin_unlock_irqrestore(&h->lock, flags); +	if (unlikely(lockup_detected(h))) {  		cmd->result = DID_ERROR << 16;  		done(cmd);  		return 0;  	} -	spin_unlock_irqrestore(&h->lock, flags);  	c = cmd_alloc(h);  	if (c == NULL) {			/* trouble... */  		dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); @@ -2211,6 +3997,32 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  	c->cmd_type = CMD_SCSI;  	c->scsi_cmd = cmd; + +	/* Call alternate submit routine for I/O accelerated commands. +	 * Retries always go down the normal I/O path. +	 */ +	if (likely(cmd->retries == 0 && +		cmd->request->cmd_type == REQ_TYPE_FS && +		h->acciopath_status)) { +		if (dev->offload_enabled) { +			rc = hpsa_scsi_ioaccel_raid_map(h, c); +			if (rc == 0) +				return 0; /* Sent on ioaccel path */ +			if (rc < 0) {   /* scsi_dma_map failed. */ +				cmd_free(h, c); +				return SCSI_MLQUEUE_HOST_BUSY; +			} +		} else if (dev->ioaccel_handle) { +			rc = hpsa_scsi_ioaccel_direct_map(h, c); +			if (rc == 0) +				return 0; /* Sent on direct map path */ +			if (rc < 0) {   /* scsi_dma_map failed. */ +				cmd_free(h, c); +				return SCSI_MLQUEUE_HOST_BUSY; +			} +		} +	} +  	c->Header.ReplyQueue = 0;  /* unused in simple mode */  	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);  	c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); @@ -2270,11 +4082,35 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  static DEF_SCSI_QCMD(hpsa_scsi_queue_command) +static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) +{ +	unsigned long flags; + +	/* +	 * Don't let rescans be initiated on a controller known +	 * to be locked up.  If the controller locks up *during* +	 * a rescan, that thread is probably hosed, but at least +	 * we can prevent new rescan threads from piling up on a +	 * locked up controller. +	 */ +	if (unlikely(lockup_detected(h))) { +		spin_lock_irqsave(&h->scan_lock, flags); +		h->scan_finished = 1; +		wake_up_all(&h->scan_wait_queue); +		spin_unlock_irqrestore(&h->scan_lock, flags); +		return 1; +	} +	return 0; +} +  static void hpsa_scan_start(struct Scsi_Host *sh)  {  	struct ctlr_info *h = shost_to_hba(sh);  	unsigned long flags; +	if (do_not_scan_if_controller_locked_up(h)) +		return; +  	/* wait until any scan already in progress is finished. */  	while (1) {  		spin_lock_irqsave(&h->scan_lock, flags); @@ -2291,6 +4127,9 @@ static void hpsa_scan_start(struct Scsi_Host *sh)  	h->scan_finished = 0; /* mark scan as in progress */  	spin_unlock_irqrestore(&h->scan_lock, flags); +	if (do_not_scan_if_controller_locked_up(h)) +		return; +  	hpsa_update_scsi_devices(h, h->scsi_host->host_no);  	spin_lock_irqsave(&h->scan_lock, flags); @@ -2354,7 +4193,10 @@ static int hpsa_register_scsi(struct ctlr_info *h)  	sh->max_lun = HPSA_MAX_LUN;  	sh->max_id = HPSA_MAX_LUN;  	sh->can_queue = h->nr_cmds; -	sh->cmd_per_lun = h->nr_cmds; +	if (h->hba_mode_enabled) +		sh->cmd_per_lun = 7; +	else +		sh->cmd_per_lun = h->nr_cmds;  	sh->sg_tablesize = h->maxsgentries;  	h->scsi_host = sh;  	sh->hostdata[0] = (unsigned long) h; @@ -2380,7 +4222,7 @@ static int hpsa_register_scsi(struct ctlr_info *h)  static int wait_for_device_to_become_ready(struct ctlr_info *h,  	unsigned char lunaddr[])  { -	int rc = 0; +	int rc;  	int count = 0;  	int waittime = 1; /* seconds */  	struct CommandList *c; @@ -2400,6 +4242,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,  		 */  		msleep(1000 * waittime);  		count++; +		rc = 0; /* Device ready. */  		/* Increase wait time with each try, up to a point. */  		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) @@ -2456,7 +4299,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)  	dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",  		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);  	/* send a reset to the SCSI LUN which the command was sent to */ -	rc = hpsa_send_reset(h, dev->scsi3addr); +	rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);  	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)  		return SUCCESS; @@ -2479,12 +4322,36 @@ static void swizzle_abort_tag(u8 *tag)  	tag[7] = original_tag[4];  } +static void hpsa_get_tag(struct ctlr_info *h, +	struct CommandList *c, u32 *taglower, u32 *tagupper) +{ +	if (c->cmd_type == CMD_IOACCEL1) { +		struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) +			&h->ioaccel_cmd_pool[c->cmdindex]; +		*tagupper = cm1->Tag.upper; +		*taglower = cm1->Tag.lower; +		return; +	} +	if (c->cmd_type == CMD_IOACCEL2) { +		struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) +			&h->ioaccel2_cmd_pool[c->cmdindex]; +		/* upper tag not used in ioaccel2 mode */ +		memset(tagupper, 0, sizeof(*tagupper)); +		*taglower = cm2->Tag; +		return; +	} +	*tagupper = c->Header.Tag.upper; +	*taglower = c->Header.Tag.lower; +} + +  static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,  	struct CommandList *abort, int swizzle)  {  	int rc = IO_OK;  	struct CommandList *c;  	struct ErrorInfo *ei; +	u32 tagupper, taglower;  	c = cmd_special_alloc(h);  	if (c == NULL) {	/* trouble... */ @@ -2498,8 +4365,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,  	if (swizzle)  		swizzle_abort_tag(&c->Request.CDB[4]);  	hpsa_scsi_do_simple_cmd_core(h, c); +	hpsa_get_tag(h, abort, &taglower, &tagupper);  	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", -		__func__, abort->Header.Tag.upper, abort->Header.Tag.lower); +		__func__, tagupper, taglower);  	/* no unmap needed here because no data xfer. */  	ei = c->err_info; @@ -2511,15 +4379,14 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,  		break;  	default:  		dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", -			__func__, abort->Header.Tag.upper, -			abort->Header.Tag.lower); -		hpsa_scsi_interpret_error(c); +			__func__, tagupper, taglower); +		hpsa_scsi_interpret_error(h, c);  		rc = -1;  		break;  	}  	cmd_special_free(h, c); -	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, -		abort->Header.Tag.upper, abort->Header.Tag.lower); +	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", +		__func__, tagupper, taglower);  	return rc;  } @@ -2573,6 +4440,83 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,  	return NULL;  } +/* ioaccel2 path firmware cannot handle abort task requests. + * Change abort requests to physical target reset, and send to the + * address of the physical disk used for the ioaccel 2 command. + * Return 0 on success (IO_OK) + *	 -1 on failure + */ + +static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, +	unsigned char *scsi3addr, struct CommandList *abort) +{ +	int rc = IO_OK; +	struct scsi_cmnd *scmd; /* scsi command within request being aborted */ +	struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ +	unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ +	unsigned char *psa = &phys_scsi3addr[0]; + +	/* Get a pointer to the hpsa logical device. */ +	scmd = (struct scsi_cmnd *) abort->scsi_cmd; +	dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); +	if (dev == NULL) { +		dev_warn(&h->pdev->dev, +			"Cannot abort: no device pointer for command.\n"); +			return -1; /* not abortable */ +	} + +	if (h->raid_offload_debug > 0) +		dev_info(&h->pdev->dev, +			"Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +			h->scsi_host->host_no, dev->bus, dev->target, dev->lun, +			scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], +			scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); + +	if (!dev->offload_enabled) { +		dev_warn(&h->pdev->dev, +			"Can't abort: device is not operating in HP SSD Smart Path mode.\n"); +		return -1; /* not abortable */ +	} + +	/* Incoming scsi3addr is logical addr. We need physical disk addr. */ +	if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { +		dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); +		return -1; /* not abortable */ +	} + +	/* send the reset */ +	if (h->raid_offload_debug > 0) +		dev_info(&h->pdev->dev, +			"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +			psa[0], psa[1], psa[2], psa[3], +			psa[4], psa[5], psa[6], psa[7]); +	rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); +	if (rc != 0) { +		dev_warn(&h->pdev->dev, +			"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +			psa[0], psa[1], psa[2], psa[3], +			psa[4], psa[5], psa[6], psa[7]); +		return rc; /* failed to reset */ +	} + +	/* wait for device to recover */ +	if (wait_for_device_to_become_ready(h, psa) != 0) { +		dev_warn(&h->pdev->dev, +			"Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +			psa[0], psa[1], psa[2], psa[3], +			psa[4], psa[5], psa[6], psa[7]); +		return -1;  /* failed to recover */ +	} + +	/* device recovered */ +	dev_info(&h->pdev->dev, +		"Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +		psa[0], psa[1], psa[2], psa[3], +		psa[4], psa[5], psa[6], psa[7]); + +	return rc; /* success */ +} +  /* Some Smart Arrays need the abort tag swizzled, and some don't.  It's hard to   * tell which kind we're dealing with, so we send the abort both ways.  There   * shouldn't be any collisions between swizzled and unswizzled tags due to the @@ -2586,6 +4530,14 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,  	struct CommandList *c;  	int rc = 0, rc2 = 0; +	/* ioccelerator mode 2 commands should be aborted via the +	 * accelerated path, since RAID path is unaware of these commands, +	 * but underlying firmware can't handle abort TMF. +	 * Change abort to physical device reset. +	 */ +	if (abort->cmd_type == CMD_IOACCEL2) +		return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); +  	/* we do not expect to find the swizzled tag in our queue, but  	 * check anyway just to be sure the assumptions which make this  	 * the case haven't become wrong. @@ -2624,6 +4576,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)  	struct scsi_cmnd *as;	/* ptr to scsi cmd inside aborted command. */  	char msg[256];		/* For debug messaging. */  	int ml = 0; +	u32 tagupper, taglower;  	/* Find the controller of the command to be aborted */  	h = sdev_to_hba(sc->device); @@ -2656,9 +4609,8 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)  				msg);  		return FAILED;  	} - -	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", -		abort->Header.Tag.upper, abort->Header.Tag.lower); +	hpsa_get_tag(h, abort, &taglower, &tagupper); +	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);  	as  = (struct scsi_cmnd *) abort->scsi_cmd;  	if (as != NULL)  		ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", @@ -2784,6 +4736,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)  		return NULL;  	memset(c, 0, sizeof(*c)); +	c->cmd_type = CMD_SCSI;  	c->cmdindex = -1;  	c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), @@ -2997,7 +4950,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)  		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);  		if (buff == NULL)  			return -EFAULT; -		if (iocommand.Request.Type.Direction == XFER_WRITE) { +		if (iocommand.Request.Type.Direction & XFER_WRITE) {  			/* Copy the data into the buffer we created */  			if (copy_from_user(buff, iocommand.buf,  				iocommand.buf_size)) { @@ -3046,7 +4999,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)  		c->SG[0].Addr.lower = temp64.val32.lower;  		c->SG[0].Addr.upper = temp64.val32.upper;  		c->SG[0].Len = iocommand.buf_size; -		c->SG[0].Ext = 0; /* we are not chaining*/ +		c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/  	}  	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);  	if (iocommand.buf_size > 0) @@ -3060,7 +5013,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)  		rc = -EFAULT;  		goto out;  	} -	if (iocommand.Request.Type.Direction == XFER_READ && +	if ((iocommand.Request.Type.Direction & XFER_READ) &&  		iocommand.buf_size > 0) {  		/* Copy the data out of the buffer we created */  		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { @@ -3137,7 +5090,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)  			status = -ENOMEM;  			goto cleanup1;  		} -		if (ioc->Request.Type.Direction == XFER_WRITE) { +		if (ioc->Request.Type.Direction & XFER_WRITE) {  			if (copy_from_user(buff[sg_used], data_ptr, sz)) {  				status = -ENOMEM;  				goto cleanup1; @@ -3171,13 +5124,12 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)  				hpsa_pci_unmap(h->pdev, c, i,  					PCI_DMA_BIDIRECTIONAL);  				status = -ENOMEM; -				goto cleanup1; +				goto cleanup0;  			}  			c->SG[i].Addr.lower = temp64.val32.lower;  			c->SG[i].Addr.upper = temp64.val32.upper;  			c->SG[i].Len = buff_size[i]; -			/* we are not chaining */ -			c->SG[i].Ext = 0; +			c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;  		}  	}  	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); @@ -3187,24 +5139,23 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)  	/* Copy the error information out */  	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));  	if (copy_to_user(argp, ioc, sizeof(*ioc))) { -		cmd_special_free(h, c);  		status = -EFAULT; -		goto cleanup1; +		goto cleanup0;  	} -	if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { +	if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {  		/* Copy the data out of the buffer we created */  		BYTE __user *ptr = ioc->buf;  		for (i = 0; i < sg_used; i++) {  			if (copy_to_user(ptr, buff[i], buff_size[i])) { -				cmd_special_free(h, c);  				status = -EFAULT; -				goto cleanup1; +				goto cleanup0;  			}  			ptr += buff_size[i];  		}  	} -	cmd_special_free(h, c);  	status = 0; +cleanup0: +	cmd_special_free(h, c);  cleanup1:  	if (buff) {  		for (i = 0; i < sg_used; i++) @@ -3223,6 +5174,36 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,  			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)  		(void) check_for_unit_attention(h, c);  } + +static int increment_passthru_count(struct ctlr_info *h) +{ +	unsigned long flags; + +	spin_lock_irqsave(&h->passthru_count_lock, flags); +	if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { +		spin_unlock_irqrestore(&h->passthru_count_lock, flags); +		return -1; +	} +	h->passthru_count++; +	spin_unlock_irqrestore(&h->passthru_count_lock, flags); +	return 0; +} + +static void decrement_passthru_count(struct ctlr_info *h) +{ +	unsigned long flags; + +	spin_lock_irqsave(&h->passthru_count_lock, flags); +	if (h->passthru_count <= 0) { +		spin_unlock_irqrestore(&h->passthru_count_lock, flags); +		/* not expecting to get here. */ +		dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); +		return; +	} +	h->passthru_count--; +	spin_unlock_irqrestore(&h->passthru_count_lock, flags); +} +  /*   * ioctl   */ @@ -3230,6 +5211,7 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)  {  	struct ctlr_info *h;  	void __user *argp = (void __user *)arg; +	int rc;  	h = sdev_to_hba(dev); @@ -3244,9 +5226,17 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)  	case CCISS_GETDRIVVER:  		return hpsa_getdrivver_ioctl(h, argp);  	case CCISS_PASSTHRU: -		return hpsa_passthru_ioctl(h, argp); +		if (increment_passthru_count(h)) +			return -EAGAIN; +		rc = hpsa_passthru_ioctl(h, argp); +		decrement_passthru_count(h); +		return rc;  	case CCISS_BIG_PASSTHRU: -		return hpsa_big_passthru_ioctl(h, argp); +		if (increment_passthru_count(h)) +			return -EAGAIN; +		rc = hpsa_big_passthru_ioctl(h, argp); +		decrement_passthru_count(h); +		return rc;  	default:  		return -ENOTTY;  	} @@ -3274,7 +5264,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,  }  static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, -	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, +	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,  	int cmd_type)  {  	int pci_dir = XFER_NONE; @@ -3297,9 +5287,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,  		switch (cmd) {  		case HPSA_INQUIRY:  			/* are we trying to read a vital product page */ -			if (page_code != 0) { +			if (page_code & VPD_PAGE) {  				c->Request.CDB[1] = 0x01; -				c->Request.CDB[2] = page_code; +				c->Request.CDB[2] = (page_code & 0xff);  			}  			c->Request.CDBLen = 6;  			c->Request.Type.Attribute = ATTR_SIMPLE; @@ -3339,6 +5329,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,  			c->Request.Type.Direction = XFER_NONE;  			c->Request.Timeout = 0;  			break; +		case HPSA_GET_RAID_MAP: +			c->Request.CDBLen = 12; +			c->Request.Type.Attribute = ATTR_SIMPLE; +			c->Request.Type.Direction = XFER_READ; +			c->Request.Timeout = 0; +			c->Request.CDB[0] = HPSA_CISS_READ; +			c->Request.CDB[1] = cmd; +			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ +			c->Request.CDB[7] = (size >> 16) & 0xFF; +			c->Request.CDB[8] = (size >> 8) & 0xFF; +			c->Request.CDB[9] = size & 0xFF; +			break; +		case BMIC_SENSE_CONTROLLER_PARAMETERS: +			c->Request.CDBLen = 10; +			c->Request.Type.Attribute = ATTR_SIMPLE; +			c->Request.Type.Direction = XFER_READ; +			c->Request.Timeout = 0; +			c->Request.CDB[0] = BMIC_READ; +			c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; +			c->Request.CDB[7] = (size >> 16) & 0xFF; +			c->Request.CDB[8] = (size >> 8) & 0xFF; +			break;  		default:  			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);  			BUG(); @@ -3434,20 +5446,21 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)  /* Takes cmds off the submission queue and sends them to the hardware,   * then puts them on the queue of cmds waiting for completion. + * Assumes h->lock is held   */ -static void start_io(struct ctlr_info *h) +static void start_io(struct ctlr_info *h, unsigned long *flags)  {  	struct CommandList *c; -	unsigned long flags; -	spin_lock_irqsave(&h->lock, flags);  	while (!list_empty(&h->reqQ)) {  		c = list_entry(h->reqQ.next, struct CommandList, list);  		/* can't do anything if fifo is full */  		if ((h->access.fifo_full(h))) { +			h->fifo_recently_full = 1;  			dev_warn(&h->pdev->dev, "fifo full\n");  			break;  		} +		h->fifo_recently_full = 0;  		/* Get the first entry from the Request Q */  		removeQ(c); @@ -3461,14 +5474,20 @@ static void start_io(struct ctlr_info *h)  		 * condition.  		 */  		h->commands_outstanding++; -		if (h->commands_outstanding > h->max_outstanding) -			h->max_outstanding = h->commands_outstanding;  		/* Tell the controller execute command */ -		spin_unlock_irqrestore(&h->lock, flags); +		spin_unlock_irqrestore(&h->lock, *flags);  		h->access.submit_command(h, c); -		spin_lock_irqsave(&h->lock, flags); +		spin_lock_irqsave(&h->lock, *flags);  	} +} + +static void lock_and_start_io(struct ctlr_info *h) +{ +	unsigned long flags; + +	spin_lock_irqsave(&h->lock, flags); +	start_io(h, &flags);  	spin_unlock_irqrestore(&h->lock, flags);  } @@ -3501,15 +5520,42 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,  static inline void finish_cmd(struct CommandList *c)  {  	unsigned long flags; +	int io_may_be_stalled = 0; +	struct ctlr_info *h = c->h; -	spin_lock_irqsave(&c->h->lock, flags); +	spin_lock_irqsave(&h->lock, flags);  	removeQ(c); -	spin_unlock_irqrestore(&c->h->lock, flags); + +	/* +	 * Check for possibly stalled i/o. +	 * +	 * If a fifo_full condition is encountered, requests will back up +	 * in h->reqQ.  This queue is only emptied out by start_io which is +	 * only called when a new i/o request comes in.  If no i/o's are +	 * forthcoming, the i/o's in h->reqQ can get stuck.  So we call +	 * start_io from here if we detect such a danger. +	 * +	 * Normally, we shouldn't hit this case, but pounding on the +	 * CCISS_PASSTHRU ioctl can provoke it.  Only call start_io if +	 * commands_outstanding is low.  We want to avoid calling +	 * start_io from in here as much as possible, and esp. don't +	 * want to get in a cycle where we call start_io every time +	 * through here. +	 */ +	if (unlikely(h->fifo_recently_full) && +		h->commands_outstanding < 5) +		io_may_be_stalled = 1; + +	spin_unlock_irqrestore(&h->lock, flags); +  	dial_up_lockup_detection_on_fw_flash_complete(c->h, c); -	if (likely(c->cmd_type == CMD_SCSI)) +	if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI +			|| c->cmd_type == CMD_IOACCEL2))  		complete_scsi_command(c);  	else if (c->cmd_type == CMD_IOCTL_PEND)  		complete(c->waiting); +	if (unlikely(io_may_be_stalled)) +		lock_and_start_io(h);  }  static inline u32 hpsa_tag_contains_index(u32 tag) @@ -3785,6 +5831,13 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,  		 */  		dev_info(&pdev->dev, "using doorbell to reset controller\n");  		writel(use_doorbell, vaddr + SA5_DOORBELL); + +		/* PMC hardware guys tell us we need a 10 second delay after +		 * doorbell reset and before any attempt to talk to the board +		 * at all to ensure that this actually works and doesn't fall +		 * over in some weird corner cases. +		 */ +		msleep(10000);  	} else { /* Try to do it the PCI power state way */  		/* Quoting from the Open CISS Specification: "The Power @@ -3981,16 +6034,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)  	   need a little pause here */  	msleep(HPSA_POST_RESET_PAUSE_MSECS); -	/* Wait for board to become not ready, then ready. */ -	dev_info(&pdev->dev, "Waiting for board to reset.\n"); -	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); -	if (rc) { -		dev_warn(&pdev->dev, -			"failed waiting for board to reset." -			" Will try soft reset.\n"); -		rc = -ENOTSUPP; /* Not expected, but try soft reset later */ -		goto unmap_cfgtable; -	}  	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);  	if (rc) {  		dev_warn(&pdev->dev, @@ -4114,21 +6157,26 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)  		goto default_int_mode;  	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {  		dev_info(&h->pdev->dev, "MSIX\n"); +		h->msix_vector = MAX_REPLY_QUEUES; +		if (h->msix_vector > num_online_cpus()) +			h->msix_vector = num_online_cpus();  		err = pci_enable_msix(h->pdev, hpsa_msix_entries, -						MAX_REPLY_QUEUES); -		if (!err) { -			for (i = 0; i < MAX_REPLY_QUEUES; i++) -				h->intr[i] = hpsa_msix_entries[i].vector; -			h->msix_vector = 1; -			return; -		} +				      h->msix_vector);  		if (err > 0) {  			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "  			       "available\n", err); -			goto default_int_mode; +			h->msix_vector = err; +			err = pci_enable_msix(h->pdev, hpsa_msix_entries, +					      h->msix_vector); +		} +		if (!err) { +			for (i = 0; i < h->msix_vector; i++) +				h->intr[i] = hpsa_msix_entries[i].vector; +			return;  		} else {  			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",  			       err); +			h->msix_vector = 0;  			goto default_int_mode;  		}  	} @@ -4281,6 +6329,7 @@ static void hpsa_find_board_params(struct ctlr_info *h)  	hpsa_get_max_perf_mode_cmds(h);  	h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */  	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); +	h->fw_support = readl(&(h->cfgtable->misc_fw_support));  	/*  	 * Limit in-command s/g elements to 32 save dma'able memory.  	 * Howvever spec says if 0, use 31 @@ -4297,6 +6346,10 @@ static void hpsa_find_board_params(struct ctlr_info *h)  	/* Find out what task management functions are supported and cache */  	h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); +	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) +		dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); +	if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) +		dev_warn(&h->pdev->dev, "Logical aborts not supported\n");  }  static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) @@ -4308,16 +6361,17 @@ static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)  	return true;  } -/* Need to enable prefetch in the SCSI core for 6400 in x86 */ -static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) +static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)  { -#ifdef CONFIG_X86 -	u32 prefetch; +	u32 driver_support; -	prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); -	prefetch |= 0x100; -	writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); +#ifdef CONFIG_X86 +	/* Need to enable prefetch in the SCSI core for 6400 in x86 */ +	driver_support = readl(&(h->cfgtable->driver_support)); +	driver_support |= ENABLE_SCSI_PREFETCH;  #endif +	driver_support |= ENABLE_UNIT_ATTN; +	writel(driver_support, &(h->cfgtable->driver_support));  }  /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result @@ -4334,6 +6388,23 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)  	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);  } +static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) +{ +	int i; +	u32 doorbell_value; +	unsigned long flags; +	/* wait until the clear_event_notify bit 6 is cleared by controller. */ +	for (i = 0; i < MAX_CONFIG_WAIT; i++) { +		spin_lock_irqsave(&h->lock, flags); +		doorbell_value = readl(h->vaddr + SA5_DOORBELL); +		spin_unlock_irqrestore(&h->lock, flags); +		if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) +			break; +		/* delay and try again */ +		msleep(20); +	} +} +  static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)  {  	int i; @@ -4364,18 +6435,20 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h)  		return -ENOTSUPP;  	h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); +  	/* Update the field, and then ring the doorbell */  	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); +	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);  	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);  	hpsa_wait_for_mode_change_ack(h);  	print_cfg_table(&h->pdev->dev, h->cfgtable); -	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { -		dev_warn(&h->pdev->dev, -			"unable to get board into simple mode\n"); -		return -ENODEV; -	} +	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) +		goto error;  	h->transMethod = CFGTBL_Trans_Simple;  	return 0; +error: +	dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); +	return -ENODEV;  }  static int hpsa_pci_init(struct ctlr_info *h) @@ -4427,7 +6500,7 @@ static int hpsa_pci_init(struct ctlr_info *h)  		err = -ENODEV;  		goto err_out_free_res;  	} -	hpsa_enable_scsi_prefetch(h); +	hpsa_set_driver_support_bits(h);  	hpsa_p600_dma_prefetch_quirk(h);  	err = hpsa_enter_simple_mode(h);  	if (err) @@ -4521,11 +6594,30 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)  		pci_free_consistent(h->pdev,  			    h->nr_cmds * sizeof(struct CommandList),  			    h->cmd_pool, h->cmd_pool_dhandle); +	if (h->ioaccel2_cmd_pool) +		pci_free_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), +			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);  	if (h->errinfo_pool)  		pci_free_consistent(h->pdev,  			    h->nr_cmds * sizeof(struct ErrorInfo),  			    h->errinfo_pool,  			    h->errinfo_pool_dhandle); +	if (h->ioaccel_cmd_pool) +		pci_free_consistent(h->pdev, +			h->nr_cmds * sizeof(struct io_accel1_cmd), +			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); +} + +static void hpsa_irq_affinity_hints(struct ctlr_info *h) +{ +	int i, cpu, rc; + +	cpu = cpumask_first(cpu_online_mask); +	for (i = 0; i < h->msix_vector; i++) { +		rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); +		cpu = cpumask_next(cpu, cpu_online_mask); +	}  }  static int hpsa_request_irq(struct ctlr_info *h, @@ -4541,15 +6633,16 @@ static int hpsa_request_irq(struct ctlr_info *h,  	for (i = 0; i < MAX_REPLY_QUEUES; i++)  		h->q[i] = (u8) i; -	if (h->intr_mode == PERF_MODE_INT && h->msix_vector) { +	if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {  		/* If performant mode and MSI-X, use multiple reply queues */ -		for (i = 0; i < MAX_REPLY_QUEUES; i++) +		for (i = 0; i < h->msix_vector; i++)  			rc = request_irq(h->intr[i], msixhandler,  					0, h->devname,  					&h->q[i]); +		hpsa_irq_affinity_hints(h);  	} else {  		/* Use single reply pool */ -		if (h->msix_vector || h->msi_vector) { +		if (h->msix_vector > 0 || h->msi_vector) {  			rc = request_irq(h->intr[h->intr_mode],  				msixhandler, 0, h->devname,  				&h->q[h->intr_mode]); @@ -4598,12 +6691,15 @@ static void free_irqs(struct ctlr_info *h)  	if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {  		/* Single reply queue, only one irq to free */  		i = h->intr_mode; +		irq_set_affinity_hint(h->intr[i], NULL);  		free_irq(h->intr[i], &h->q[i]);  		return;  	} -	for (i = 0; i < MAX_REPLY_QUEUES; i++) +	for (i = 0; i < h->msix_vector; i++) { +		irq_set_affinity_hint(h->intr[i], NULL);  		free_irq(h->intr[i], &h->q[i]); +	}  }  static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) @@ -4620,14 +6716,28 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)  #endif /* CONFIG_PCI_MSI */  } +static void hpsa_free_reply_queues(struct ctlr_info *h) +{ +	int i; + +	for (i = 0; i < h->nreply_queues; i++) { +		if (!h->reply_queue[i].head) +			continue; +		pci_free_consistent(h->pdev, h->reply_queue_size, +			h->reply_queue[i].head, h->reply_queue[i].busaddr); +		h->reply_queue[i].head = NULL; +		h->reply_queue[i].busaddr = 0; +	} +} +  static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)  {  	hpsa_free_irqs_and_disable_msix(h);  	hpsa_free_sg_chain_blocks(h);  	hpsa_free_cmd_pool(h); +	kfree(h->ioaccel1_blockFetchTable);  	kfree(h->blockFetchTable); -	pci_free_consistent(h->pdev, h->reply_pool_size, -		h->reply_pool, h->reply_pool_dhandle); +	hpsa_free_reply_queues(h);  	if (h->vaddr)  		iounmap(h->vaddr);  	if (h->transtable) @@ -4638,16 +6748,6 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)  	kfree(h);  } -static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h) -{ -	assert_spin_locked(&lockup_detector_lock); -	if (!hpsa_lockup_detector) -		return; -	if (h->lockup_detected) -		return; /* already stopped the lockup detector */ -	list_del(&h->lockup_list); -} -  /* Called when controller lockup detected. */  static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)  { @@ -4662,18 +6762,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)  	}  } +static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) +{ +	int i, cpu; + +	cpu = cpumask_first(cpu_online_mask); +	for (i = 0; i < num_online_cpus(); i++) { +		u32 *lockup_detected; +		lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); +		*lockup_detected = value; +		cpu = cpumask_next(cpu, cpu_online_mask); +	} +	wmb(); /* be sure the per-cpu variables are out to memory */ +} +  static void controller_lockup_detected(struct ctlr_info *h)  {  	unsigned long flags; +	u32 lockup_detected; -	assert_spin_locked(&lockup_detector_lock); -	remove_ctlr_from_lockup_detector_list(h);  	h->access.set_intr_mask(h, HPSA_INTR_OFF);  	spin_lock_irqsave(&h->lock, flags); -	h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); +	lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); +	if (!lockup_detected) { +		/* no heartbeat, but controller gave us a zero. */ +		dev_warn(&h->pdev->dev, +			"lockup detected but scratchpad register is zero\n"); +		lockup_detected = 0xffffffff; +	} +	set_lockup_detected_for_all_cpus(h, lockup_detected);  	spin_unlock_irqrestore(&h->lock, flags);  	dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", -			h->lockup_detected); +			lockup_detected);  	pci_disable_device(h->pdev);  	spin_lock_irqsave(&h->lock, flags);  	fail_all_cmds_on_list(h, &h->cmpQ); @@ -4687,7 +6807,6 @@ static void detect_controller_lockup(struct ctlr_info *h)  	u32 heartbeat;  	unsigned long flags; -	assert_spin_locked(&lockup_detector_lock);  	now = get_jiffies_64();  	/* If we've received an interrupt recently, we're ok. */  	if (time_after64(h->last_intr_timestamp + @@ -4717,68 +6836,117 @@ static void detect_controller_lockup(struct ctlr_info *h)  	h->last_heartbeat_timestamp = now;  } -static int detect_controller_lockup_thread(void *notused) +static void hpsa_ack_ctlr_events(struct ctlr_info *h)  { -	struct ctlr_info *h; -	unsigned long flags; - -	while (1) { -		struct list_head *this, *tmp; - -		schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL); -		if (kthread_should_stop()) -			break; -		spin_lock_irqsave(&lockup_detector_lock, flags); -		list_for_each_safe(this, tmp, &hpsa_ctlr_list) { -			h = list_entry(this, struct ctlr_info, lockup_list); -			detect_controller_lockup(h); -		} -		spin_unlock_irqrestore(&lockup_detector_lock, flags); +	int i; +	char *event_type; + +	/* Clear the driver-requested rescan flag */ +	h->drv_req_rescan = 0; + +	/* Ask the controller to clear the events we're handling. */ +	if ((h->transMethod & (CFGTBL_Trans_io_accel1 +			| CFGTBL_Trans_io_accel2)) && +		(h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || +		 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { + +		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) +			event_type = "state change"; +		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) +			event_type = "configuration change"; +		/* Stop sending new RAID offload reqs via the IO accelerator */ +		scsi_block_requests(h->scsi_host); +		for (i = 0; i < h->ndevices; i++) +			h->dev[i]->offload_enabled = 0; +		hpsa_drain_accel_commands(h); +		/* Set 'accelerator path config change' bit */ +		dev_warn(&h->pdev->dev, +			"Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", +			h->events, event_type); +		writel(h->events, &(h->cfgtable->clear_event_notify)); +		/* Set the "clear event notify field update" bit 6 */ +		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); +		/* Wait until ctlr clears 'clear event notify field', bit 6 */ +		hpsa_wait_for_clear_event_notify_ack(h); +		scsi_unblock_requests(h->scsi_host); +	} else { +		/* Acknowledge controller notification events. */ +		writel(h->events, &(h->cfgtable->clear_event_notify)); +		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); +		hpsa_wait_for_clear_event_notify_ack(h); +#if 0 +		writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); +		hpsa_wait_for_mode_change_ack(h); +#endif  	} -	return 0; +	return;  } -static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h) +/* Check a register on the controller to see if there are configuration + * changes (added/changed/removed logical drives, etc.) which mean that + * we should rescan the controller for devices. + * Also check flag for driver-initiated rescan. + */ +static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)  { -	unsigned long flags; +	if (h->drv_req_rescan) +		return 1; -	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; -	spin_lock_irqsave(&lockup_detector_lock, flags); -	list_add_tail(&h->lockup_list, &hpsa_ctlr_list); -	spin_unlock_irqrestore(&lockup_detector_lock, flags); +	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) +		return 0; + +	h->events = readl(&(h->cfgtable->event_notify)); +	return h->events & RESCAN_REQUIRED_EVENT_BITS;  } -static void start_controller_lockup_detector(struct ctlr_info *h) +/* + * Check if any of the offline devices have become ready + */ +static int hpsa_offline_devices_ready(struct ctlr_info *h)  { -	/* Start the lockup detector thread if not already started */ -	if (!hpsa_lockup_detector) { -		spin_lock_init(&lockup_detector_lock); -		hpsa_lockup_detector = -			kthread_run(detect_controller_lockup_thread, -						NULL, HPSA); -	} -	if (!hpsa_lockup_detector) { -		dev_warn(&h->pdev->dev, -			"Could not start lockup detector thread\n"); -		return; +	unsigned long flags; +	struct offline_device_entry *d; +	struct list_head *this, *tmp; + +	spin_lock_irqsave(&h->offline_device_lock, flags); +	list_for_each_safe(this, tmp, &h->offline_device_list) { +		d = list_entry(this, struct offline_device_entry, +				offline_list); +		spin_unlock_irqrestore(&h->offline_device_lock, flags); +		if (!hpsa_volume_offline(h, d->scsi3addr)) +			return 1; +		spin_lock_irqsave(&h->offline_device_lock, flags);  	} -	add_ctlr_to_lockup_detector_list(h); +	spin_unlock_irqrestore(&h->offline_device_lock, flags); +	return 0;  } -static void stop_controller_lockup_detector(struct ctlr_info *h) + +static void hpsa_monitor_ctlr_worker(struct work_struct *work)  {  	unsigned long flags; +	struct ctlr_info *h = container_of(to_delayed_work(work), +					struct ctlr_info, monitor_ctlr_work); +	detect_controller_lockup(h); +	if (lockup_detected(h)) +		return; + +	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { +		scsi_host_get(h->scsi_host); +		h->drv_req_rescan = 0; +		hpsa_ack_ctlr_events(h); +		hpsa_scan_start(h->scsi_host); +		scsi_host_put(h->scsi_host); +	} -	spin_lock_irqsave(&lockup_detector_lock, flags); -	remove_ctlr_from_lockup_detector_list(h); -	/* If the list of ctlr's to monitor is empty, stop the thread */ -	if (list_empty(&hpsa_ctlr_list)) { -		spin_unlock_irqrestore(&lockup_detector_lock, flags); -		kthread_stop(hpsa_lockup_detector); -		spin_lock_irqsave(&lockup_detector_lock, flags); -		hpsa_lockup_detector = NULL; +	spin_lock_irqsave(&h->lock, flags); +	if (h->remove_in_progress) { +		spin_unlock_irqrestore(&h->lock, flags); +		return;  	} -	spin_unlock_irqrestore(&lockup_detector_lock, flags); +	schedule_delayed_work(&h->monitor_ctlr_work, +				h->heartbeat_sample_interval); +	spin_unlock_irqrestore(&h->lock, flags);  }  static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -4810,7 +6978,6 @@ reinit_after_soft_reset:  	 * the 5 lower bits of the address are used by the hardware. and by  	 * the driver.  See comments in hpsa.h for more info.  	 */ -#define COMMANDLIST_ALIGNMENT 32  	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);  	h = kzalloc(sizeof(*h), GFP_KERNEL);  	if (!h) @@ -4820,8 +6987,18 @@ reinit_after_soft_reset:  	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;  	INIT_LIST_HEAD(&h->cmpQ);  	INIT_LIST_HEAD(&h->reqQ); +	INIT_LIST_HEAD(&h->offline_device_list);  	spin_lock_init(&h->lock); +	spin_lock_init(&h->offline_device_lock);  	spin_lock_init(&h->scan_lock); +	spin_lock_init(&h->passthru_count_lock); + +	/* Allocate and clear per-cpu variable lockup_detected */ +	h->lockup_detected = alloc_percpu(u32); +	if (!h->lockup_detected) +		goto clean1; +	set_lockup_detected_for_all_cpus(h, 0); +  	rc = hpsa_pci_init(h);  	if (rc != 0)  		goto clean1; @@ -4861,6 +7038,7 @@ reinit_after_soft_reset:  	pci_set_drvdata(pdev, h);  	h->ndevices = 0; +	h->hba_mode_enabled = 0;  	h->scsi_host = NULL;  	spin_lock_init(&h->devlock);  	hpsa_put_ctlr_into_performant_mode(h); @@ -4920,13 +7098,23 @@ reinit_after_soft_reset:  		goto reinit_after_soft_reset;  	} +		/* Enable Accelerated IO path at driver layer */ +		h->acciopath_status = 1; + +	h->drv_req_rescan = 0; +  	/* Turn the interrupts on so we can service requests */  	h->access.set_intr_mask(h, HPSA_INTR_ON);  	hpsa_hba_inquiry(h);  	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */ -	start_controller_lockup_detector(h); -	return 1; + +	/* Monitor the controller for firmware lockups */ +	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; +	INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); +	schedule_delayed_work(&h->monitor_ctlr_work, +				h->heartbeat_sample_interval); +	return 0;  clean4:  	hpsa_free_sg_chain_blocks(h); @@ -4934,6 +7122,8 @@ clean4:  	free_irqs(h);  clean2:  clean1: +	if (h->lockup_detected) +		free_percpu(h->lockup_detected);  	kfree(h);  	return rc;  } @@ -4943,6 +7133,9 @@ static void hpsa_flush_cache(struct ctlr_info *h)  	char *flush_buf;  	struct CommandList *c; +	/* Don't bother trying to flush the cache if locked up */ +	if (unlikely(lockup_detected(h))) +		return;  	flush_buf = kzalloc(4, GFP_KERNEL);  	if (!flush_buf)  		return; @@ -4991,13 +7184,20 @@ static void hpsa_free_device_info(struct ctlr_info *h)  static void hpsa_remove_one(struct pci_dev *pdev)  {  	struct ctlr_info *h; +	unsigned long flags;  	if (pci_get_drvdata(pdev) == NULL) {  		dev_err(&pdev->dev, "unable to remove device\n");  		return;  	}  	h = pci_get_drvdata(pdev); -	stop_controller_lockup_detector(h); + +	/* Get rid of any controller monitoring work items */ +	spin_lock_irqsave(&h->lock, flags); +	h->remove_in_progress = 1; +	cancel_delayed_work(&h->monitor_ctlr_work); +	spin_unlock_irqrestore(&h->lock, flags); +  	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */  	hpsa_shutdown(pdev);  	iounmap(h->vaddr); @@ -5011,14 +7211,15 @@ static void hpsa_remove_one(struct pci_dev *pdev)  	pci_free_consistent(h->pdev,  		h->nr_cmds * sizeof(struct ErrorInfo),  		h->errinfo_pool, h->errinfo_pool_dhandle); -	pci_free_consistent(h->pdev, h->reply_pool_size, -		h->reply_pool, h->reply_pool_dhandle); +	hpsa_free_reply_queues(h);  	kfree(h->cmd_pool_bits);  	kfree(h->blockFetchTable); +	kfree(h->ioaccel1_blockFetchTable); +	kfree(h->ioaccel2_blockFetchTable);  	kfree(h->hba_inquiry_data);  	pci_disable_device(pdev);  	pci_release_regions(pdev); -	pci_set_drvdata(pdev, NULL); +	free_percpu(h->lockup_detected);  	kfree(h);  } @@ -5056,20 +7257,17 @@ static struct pci_driver hpsa_pci_driver = {   * bits of the command address.   */  static void  calc_bucket_map(int bucket[], int num_buckets, -	int nsgs, int *bucket_map) +	int nsgs, int min_blocks, int *bucket_map)  {  	int i, j, b, size; -	/* even a command with 0 SGs requires 4 blocks */ -#define MINIMUM_TRANSFER_BLOCKS 4 -#define NUM_BUCKETS 8  	/* Note, bucket_map must have nsgs+1 entries. */  	for (i = 0; i <= nsgs; i++) {  		/* Compute size of a command with i SG entries */ -		size = i + MINIMUM_TRANSFER_BLOCKS; +		size = i + min_blocks;  		b = num_buckets; /* Assume the biggest bucket */  		/* Find the bucket that is just big enough */ -		for (j = 0; j < 8; j++) { +		for (j = 0; j < num_buckets; j++) {  			if (bucket[j] >= size) {  				b = j;  				break; @@ -5080,10 +7278,16 @@ static void  calc_bucket_map(int bucket[], int num_buckets,  	}  } -static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) +static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)  {  	int i;  	unsigned long register_value; +	unsigned long transMethod = CFGTBL_Trans_Performant | +			(trans_support & CFGTBL_Trans_use_short_tags) | +				CFGTBL_Trans_enable_directed_msix | +			(trans_support & (CFGTBL_Trans_io_accel1 | +				CFGTBL_Trans_io_accel2)); +	struct access_method access = SA5_performant_access;  	/* This is a bit complicated.  There are 8 registers on  	 * the controller which we write to to tell it 8 different @@ -5103,6 +7307,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)  	 * sizes for small commands, and fewer sizes for larger commands.  	 */  	int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; +#define MIN_IOACCEL2_BFT_ENTRY 5 +#define HPSA_IOACCEL2_HEADER_SZ 4 +	int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, +			13, 14, 15, 16, 17, 18, 19, +			HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; +	BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); +	BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); +	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > +				 16 * MIN_IOACCEL2_BFT_ENTRY); +	BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);  	BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);  	/*  5 = 1 s/g entry or 4k  	 *  6 = 2 s/g entry or 8k @@ -5110,12 +7324,20 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)  	 * 10 = 6 s/g entry or 24k  	 */ +	/* If the controller supports either ioaccel method then +	 * we can also use the RAID stack submit path that does not +	 * perform the superfluous readl() after each command submission. +	 */ +	if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) +		access = SA5_performant_access_no_read; +  	/* Controller spec: zero out this buffer. */ -	memset(h->reply_pool, 0, h->reply_pool_size); +	for (i = 0; i < h->nreply_queues; i++) +		memset(h->reply_queue[i].head, 0, h->reply_queue_size);  	bft[7] = SG_ENTRIES_IN_CMD + 4;  	calc_bucket_map(bft, ARRAY_SIZE(bft), -				SG_ENTRIES_IN_CMD, h->blockFetchTable); +				SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);  	for (i = 0; i < 8; i++)  		writel(bft[i], &h->transtable->BlockFetch[i]); @@ -5127,14 +7349,26 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)  	for (i = 0; i < h->nreply_queues; i++) {  		writel(0, &h->transtable->RepQAddr[i].upper); -		writel(h->reply_pool_dhandle + -			(h->max_commands * sizeof(u64) * i), +		writel(h->reply_queue[i].busaddr,  			&h->transtable->RepQAddr[i].lower);  	} -	writel(CFGTBL_Trans_Performant | use_short_tags | -		CFGTBL_Trans_enable_directed_msix, -		&(h->cfgtable->HostWrite.TransportRequest)); +	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); +	writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); +	/* +	 * enable outbound interrupt coalescing in accelerator mode; +	 */ +	if (trans_support & CFGTBL_Trans_io_accel1) { +		access = SA5_ioaccel_mode1_access; +		writel(10, &h->cfgtable->HostWrite.CoalIntDelay); +		writel(4, &h->cfgtable->HostWrite.CoalIntCount); +	} else { +		if (trans_support & CFGTBL_Trans_io_accel2) { +			access = SA5_ioaccel_mode2_access; +			writel(10, &h->cfgtable->HostWrite.CoalIntDelay); +			writel(4, &h->cfgtable->HostWrite.CoalIntCount); +		} +	}  	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);  	hpsa_wait_for_mode_change_ack(h);  	register_value = readl(&(h->cfgtable->TransportActive)); @@ -5144,13 +7378,160 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)  		return;  	}  	/* Change the access methods to the performant access methods */ -	h->access = SA5_performant_access; -	h->transMethod = CFGTBL_Trans_Performant; +	h->access = access; +	h->transMethod = transMethod; + +	if (!((trans_support & CFGTBL_Trans_io_accel1) || +		(trans_support & CFGTBL_Trans_io_accel2))) +		return; + +	if (trans_support & CFGTBL_Trans_io_accel1) { +		/* Set up I/O accelerator mode */ +		for (i = 0; i < h->nreply_queues; i++) { +			writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); +			h->reply_queue[i].current_entry = +				readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); +		} +		bft[7] = h->ioaccel_maxsg + 8; +		calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, +				h->ioaccel1_blockFetchTable); + +		/* initialize all reply queue entries to unused */ +		for (i = 0; i < h->nreply_queues; i++) +			memset(h->reply_queue[i].head, +				(u8) IOACCEL_MODE1_REPLY_UNUSED, +				h->reply_queue_size); + +		/* set all the constant fields in the accelerator command +		 * frames once at init time to save CPU cycles later. +		 */ +		for (i = 0; i < h->nr_cmds; i++) { +			struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; + +			cp->function = IOACCEL1_FUNCTION_SCSIIO; +			cp->err_info = (u32) (h->errinfo_pool_dhandle + +					(i * sizeof(struct ErrorInfo))); +			cp->err_info_len = sizeof(struct ErrorInfo); +			cp->sgl_offset = IOACCEL1_SGLOFFSET; +			cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; +			cp->timeout_sec = 0; +			cp->ReplyQueue = 0; +			cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | +						DIRECT_LOOKUP_BIT; +			cp->Tag.upper = 0; +			cp->host_addr.lower = +				(u32) (h->ioaccel_cmd_pool_dhandle + +					(i * sizeof(struct io_accel1_cmd))); +			cp->host_addr.upper = 0; +		} +	} else if (trans_support & CFGTBL_Trans_io_accel2) { +		u64 cfg_offset, cfg_base_addr_index; +		u32 bft2_offset, cfg_base_addr; +		int rc; + +		rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, +			&cfg_base_addr_index, &cfg_offset); +		BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); +		bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; +		calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, +				4, h->ioaccel2_blockFetchTable); +		bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); +		BUILD_BUG_ON(offsetof(struct CfgTable, +				io_accel_request_size_offset) != 0xb8); +		h->ioaccel2_bft2_regs = +			remap_pci_mem(pci_resource_start(h->pdev, +					cfg_base_addr_index) + +					cfg_offset + bft2_offset, +					ARRAY_SIZE(bft2) * +					sizeof(*h->ioaccel2_bft2_regs)); +		for (i = 0; i < ARRAY_SIZE(bft2); i++) +			writel(bft2[i], &h->ioaccel2_bft2_regs[i]); +	} +	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); +	hpsa_wait_for_mode_change_ack(h); +} + +static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) +{ +	h->ioaccel_maxsg = +		readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); +	if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) +		h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; + +	/* Command structures must be aligned on a 128-byte boundary +	 * because the 7 lower bits of the address are used by the +	 * hardware. +	 */ +	BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % +			IOACCEL1_COMMANDLIST_ALIGNMENT); +	h->ioaccel_cmd_pool = +		pci_alloc_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), +			&(h->ioaccel_cmd_pool_dhandle)); + +	h->ioaccel1_blockFetchTable = +		kmalloc(((h->ioaccel_maxsg + 1) * +				sizeof(u32)), GFP_KERNEL); + +	if ((h->ioaccel_cmd_pool == NULL) || +		(h->ioaccel1_blockFetchTable == NULL)) +		goto clean_up; + +	memset(h->ioaccel_cmd_pool, 0, +		h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); +	return 0; + +clean_up: +	if (h->ioaccel_cmd_pool) +		pci_free_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), +			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); +	kfree(h->ioaccel1_blockFetchTable); +	return 1; +} + +static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) +{ +	/* Allocate ioaccel2 mode command blocks and block fetch table */ + +	h->ioaccel_maxsg = +		readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); +	if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) +		h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; + +	BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % +			IOACCEL2_COMMANDLIST_ALIGNMENT); +	h->ioaccel2_cmd_pool = +		pci_alloc_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), +			&(h->ioaccel2_cmd_pool_dhandle)); + +	h->ioaccel2_blockFetchTable = +		kmalloc(((h->ioaccel_maxsg + 1) * +				sizeof(u32)), GFP_KERNEL); + +	if ((h->ioaccel2_cmd_pool == NULL) || +		(h->ioaccel2_blockFetchTable == NULL)) +		goto clean_up; + +	memset(h->ioaccel2_cmd_pool, 0, +		h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); +	return 0; + +clean_up: +	if (h->ioaccel2_cmd_pool) +		pci_free_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), +			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); +	kfree(h->ioaccel2_blockFetchTable); +	return 1;  }  static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)  {  	u32 trans_support; +	unsigned long transMethod = CFGTBL_Trans_Performant | +					CFGTBL_Trans_use_short_tags;  	int i;  	if (hpsa_simple_mode) @@ -5160,15 +7541,32 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)  	if (!(trans_support & PERFORMANT_MODE))  		return; -	h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1; +	/* Check for I/O accelerator mode support */ +	if (trans_support & CFGTBL_Trans_io_accel1) { +		transMethod |= CFGTBL_Trans_io_accel1 | +				CFGTBL_Trans_enable_directed_msix; +		if (hpsa_alloc_ioaccel_cmd_and_bft(h)) +			goto clean_up; +	} else { +		if (trans_support & CFGTBL_Trans_io_accel2) { +				transMethod |= CFGTBL_Trans_io_accel2 | +				CFGTBL_Trans_enable_directed_msix; +		if (ioaccel2_alloc_cmds_and_bft(h)) +			goto clean_up; +		} +	} + +	h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;  	hpsa_get_max_perf_mode_cmds(h);  	/* Performant mode ring buffer and supporting data structures */ -	h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; -	h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, -				&(h->reply_pool_dhandle)); +	h->reply_queue_size = h->max_commands * sizeof(u64);  	for (i = 0; i < h->nreply_queues; i++) { -		h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; +		h->reply_queue[i].head = pci_alloc_consistent(h->pdev, +						h->reply_queue_size, +						&(h->reply_queue[i].busaddr)); +		if (!h->reply_queue[i].head) +			goto clean_up;  		h->reply_queue[i].size = h->max_commands;  		h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */  		h->reply_queue[i].current_entry = 0; @@ -5177,23 +7575,42 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)  	/* Need a block fetch table for performant mode */  	h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *  				sizeof(u32)), GFP_KERNEL); - -	if ((h->reply_pool == NULL) -		|| (h->blockFetchTable == NULL)) +	if (!h->blockFetchTable)  		goto clean_up; -	hpsa_enter_performant_mode(h, -		trans_support & CFGTBL_Trans_use_short_tags); - +	hpsa_enter_performant_mode(h, trans_support);  	return;  clean_up: -	if (h->reply_pool) -		pci_free_consistent(h->pdev, h->reply_pool_size, -			h->reply_pool, h->reply_pool_dhandle); +	hpsa_free_reply_queues(h);  	kfree(h->blockFetchTable);  } +static int is_accelerated_cmd(struct CommandList *c) +{ +	return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; +} + +static void hpsa_drain_accel_commands(struct ctlr_info *h) +{ +	struct CommandList *c = NULL; +	unsigned long flags; +	int accel_cmds_out; + +	do { /* wait for all outstanding commands to drain out */ +		accel_cmds_out = 0; +		spin_lock_irqsave(&h->lock, flags); +		list_for_each_entry(c, &h->cmpQ, list) +			accel_cmds_out += is_accelerated_cmd(c); +		list_for_each_entry(c, &h->reqQ, list) +			accel_cmds_out += is_accelerated_cmd(c); +		spin_unlock_irqrestore(&h->lock, flags); +		if (accel_cmds_out <= 0) +			break; +		msleep(100); +	} while (1); +} +  /*   *  This is it.  Register the PCI driver information for the cards we control   *  the OS will call our registered routines when it finds one of our cards. @@ -5208,5 +7625,83 @@ static void __exit hpsa_cleanup(void)  	pci_unregister_driver(&hpsa_pci_driver);  } +static void __attribute__((unused)) verify_offsets(void) +{ +#define VERIFY_OFFSET(member, offset) \ +	BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) + +	VERIFY_OFFSET(structure_size, 0); +	VERIFY_OFFSET(volume_blk_size, 4); +	VERIFY_OFFSET(volume_blk_cnt, 8); +	VERIFY_OFFSET(phys_blk_shift, 16); +	VERIFY_OFFSET(parity_rotation_shift, 17); +	VERIFY_OFFSET(strip_size, 18); +	VERIFY_OFFSET(disk_starting_blk, 20); +	VERIFY_OFFSET(disk_blk_cnt, 28); +	VERIFY_OFFSET(data_disks_per_row, 36); +	VERIFY_OFFSET(metadata_disks_per_row, 38); +	VERIFY_OFFSET(row_cnt, 40); +	VERIFY_OFFSET(layout_map_count, 42); +	VERIFY_OFFSET(flags, 44); +	VERIFY_OFFSET(dekindex, 46); +	/* VERIFY_OFFSET(reserved, 48 */ +	VERIFY_OFFSET(data, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ +	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) + +	VERIFY_OFFSET(IU_type, 0); +	VERIFY_OFFSET(direction, 1); +	VERIFY_OFFSET(reply_queue, 2); +	/* VERIFY_OFFSET(reserved1, 3);  */ +	VERIFY_OFFSET(scsi_nexus, 4); +	VERIFY_OFFSET(Tag, 8); +	VERIFY_OFFSET(cdb, 16); +	VERIFY_OFFSET(cciss_lun, 32); +	VERIFY_OFFSET(data_len, 40); +	VERIFY_OFFSET(cmd_priority_task_attr, 44); +	VERIFY_OFFSET(sg_count, 45); +	/* VERIFY_OFFSET(reserved3 */ +	VERIFY_OFFSET(err_ptr, 48); +	VERIFY_OFFSET(err_len, 56); +	/* VERIFY_OFFSET(reserved4  */ +	VERIFY_OFFSET(sg, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ +	BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) + +	VERIFY_OFFSET(dev_handle, 0x00); +	VERIFY_OFFSET(reserved1, 0x02); +	VERIFY_OFFSET(function, 0x03); +	VERIFY_OFFSET(reserved2, 0x04); +	VERIFY_OFFSET(err_info, 0x0C); +	VERIFY_OFFSET(reserved3, 0x10); +	VERIFY_OFFSET(err_info_len, 0x12); +	VERIFY_OFFSET(reserved4, 0x13); +	VERIFY_OFFSET(sgl_offset, 0x14); +	VERIFY_OFFSET(reserved5, 0x15); +	VERIFY_OFFSET(transfer_len, 0x1C); +	VERIFY_OFFSET(reserved6, 0x20); +	VERIFY_OFFSET(io_flags, 0x24); +	VERIFY_OFFSET(reserved7, 0x26); +	VERIFY_OFFSET(LUN, 0x34); +	VERIFY_OFFSET(control, 0x3C); +	VERIFY_OFFSET(CDB, 0x40); +	VERIFY_OFFSET(reserved8, 0x50); +	VERIFY_OFFSET(host_context_flags, 0x60); +	VERIFY_OFFSET(timeout_sec, 0x62); +	VERIFY_OFFSET(ReplyQueue, 0x64); +	VERIFY_OFFSET(reserved9, 0x65); +	VERIFY_OFFSET(Tag, 0x68); +	VERIFY_OFFSET(host_addr, 0x70); +	VERIFY_OFFSET(CISS_LUN, 0x78); +	VERIFY_OFFSET(SG, 0x78 + 8); +#undef VERIFY_OFFSET +} +  module_init(hpsa_init);  module_exit(hpsa_cleanup); diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index bc85e7244f4..24472cec7de 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h @@ -1,6 +1,6 @@  /*   *    Disk Array driver for HP Smart Array SAS controllers - *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. + *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.   *   *    This program is free software; you can redistribute it and/or modify   *    it under the terms of the GNU General Public License as published by @@ -46,14 +46,65 @@ struct hpsa_scsi_dev_t {  	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */  	unsigned char model[16];        /* bytes 16-31 of inquiry data */  	unsigned char raid_level;	/* from inquiry page 0xC1 */ +	unsigned char volume_offline;	/* discovered via TUR or VPD */ +	u32 ioaccel_handle; +	int offload_config;		/* I/O accel RAID offload configured */ +	int offload_enabled;		/* I/O accel RAID offload enabled */ +	int offload_to_mirror;		/* Send next I/O accelerator RAID +					 * offload request to mirror drive +					 */ +	struct raid_map_data raid_map;	/* I/O accelerator RAID map */ +  }; -struct reply_pool { +struct reply_queue_buffer {  	u64 *head;  	size_t size;  	u8 wraparound;  	u32 current_entry; +	dma_addr_t busaddr; +}; + +#pragma pack(1) +struct bmic_controller_parameters { +	u8   led_flags; +	u8   enable_command_list_verification; +	u8   backed_out_write_drives; +	u16  stripes_for_parity; +	u8   parity_distribution_mode_flags; +	u16  max_driver_requests; +	u16  elevator_trend_count; +	u8   disable_elevator; +	u8   force_scan_complete; +	u8   scsi_transfer_mode; +	u8   force_narrow; +	u8   rebuild_priority; +	u8   expand_priority; +	u8   host_sdb_asic_fix; +	u8   pdpi_burst_from_host_disabled; +	char software_name[64]; +	char hardware_name[32]; +	u8   bridge_revision; +	u8   snapshot_priority; +	u32  os_specific; +	u8   post_prompt_timeout; +	u8   automatic_drive_slamming; +	u8   reserved1; +	u8   nvram_flags; +#define HBA_MODE_ENABLED_FLAG (1 << 3) +	u8   cache_nvram_flags; +	u8   drive_config_flags; +	u16  reserved2; +	u8   temp_warning_level; +	u8   temp_shutdown_level; +	u8   temp_condition_reset; +	u8   max_coalesce_commands; +	u32  max_coalesce_delay; +	u8   orca_password[4]; +	u8   access_id[16]; +	u8   reserved[356];  }; +#pragma pack()  struct ctlr_info {  	int	ctlr; @@ -66,11 +117,8 @@ struct ctlr_info {  	int 	nr_cmds; /* Number of commands allowed on this controller */  	struct CfgTable __iomem *cfgtable;  	int	interrupts_enabled; -	int	major;  	int 	max_commands;  	int	commands_outstanding; -	int 	max_outstanding; /* Debug */ -	int	usage_count;  /* number of opens all all minor devices */  #	define PERF_MODE_INT	0  #	define DOORBELL_INT	1  #	define SIMPLE_MODE_INT	2 @@ -80,6 +128,7 @@ struct ctlr_info {  	unsigned int msi_vector;  	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */  	struct access_method access; +	char hba_mode_enabled;  	/* queue and queue Info */  	struct list_head reqQ; @@ -95,6 +144,10 @@ struct ctlr_info {  	/* pointers to command and error info pool */  	struct CommandList 	*cmd_pool;  	dma_addr_t		cmd_pool_dhandle; +	struct io_accel1_cmd	*ioaccel_cmd_pool; +	dma_addr_t		ioaccel_cmd_pool_dhandle; +	struct io_accel2_cmd	*ioaccel2_cmd_pool; +	dma_addr_t		ioaccel2_cmd_pool_dhandle;  	struct ErrorInfo 	*errinfo_pool;  	dma_addr_t		errinfo_pool_dhandle;  	unsigned long  		*cmd_pool_bits; @@ -114,23 +167,35 @@ struct ctlr_info {  	struct TransTable_struct *transtable;  	unsigned long transMethod; +	/* cap concurrent passthrus at some reasonable maximum */ +#define HPSA_MAX_CONCURRENT_PASSTHRUS (20) +	spinlock_t passthru_count_lock; /* protects passthru_count */ +	int passthru_count; +  	/*  	 * Performant mode completion buffers  	 */ -	u64 *reply_pool; -	size_t reply_pool_size; -	struct reply_pool reply_queue[MAX_REPLY_QUEUES]; +	size_t reply_queue_size; +	struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];  	u8 nreply_queues; -	dma_addr_t reply_pool_dhandle;  	u32 *blockFetchTable; +	u32 *ioaccel1_blockFetchTable; +	u32 *ioaccel2_blockFetchTable; +	u32 *ioaccel2_bft2_regs;  	unsigned char *hba_inquiry_data; +	u32 driver_support; +	u32 fw_support; +	int ioaccel_support; +	int ioaccel_maxsg;  	u64 last_intr_timestamp;  	u32 last_heartbeat;  	u64 last_heartbeat_timestamp;  	u32 heartbeat_sample_interval;  	atomic_t firmware_flash_in_progress; -	u32 lockup_detected; -	struct list_head lockup_list; +	u32 *lockup_detected; +	struct delayed_work monitor_ctlr_work; +	int remove_in_progress; +	u32 fifo_recently_full;  	/* Address of h->q[x] is passed to intr handler to know which queue */  	u8 q[MAX_REPLY_QUEUES];  	u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */ @@ -154,7 +219,33 @@ struct ctlr_info {  #define HPSATMF_LOG_QRY_TASK    (1 << 23)  #define HPSATMF_LOG_QRY_TSET    (1 << 24)  #define HPSATMF_LOG_QRY_ASYNC   (1 << 25) +	u32 events; +#define CTLR_STATE_CHANGE_EVENT				(1 << 0) +#define CTLR_ENCLOSURE_HOT_PLUG_EVENT			(1 << 1) +#define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV		(1 << 4) +#define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV		(1 << 5) +#define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL		(1 << 6) +#define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED	(1 << 30) +#define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE	(1 << 31) + +#define RESCAN_REQUIRED_EVENT_BITS \ +		(CTLR_ENCLOSURE_HOT_PLUG_EVENT | \ +		CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \ +		CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \ +		CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \ +		CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE) +	spinlock_t offline_device_lock; +	struct list_head offline_device_list; +	int	acciopath_status; +	int	drv_req_rescan;	/* flag for driver to request rescan event */ +	int	raid_offload_debug;  }; + +struct offline_device_entry { +	unsigned char scsi3addr[8]; +	struct list_head offline_list; +}; +  #define HPSA_ABORT_MSG 0  #define HPSA_DEVICE_RESET_MSG 1  #define HPSA_RESET_TYPE_CONTROLLER 0x00 @@ -235,18 +326,39 @@ struct ctlr_info {  #define HPSA_INTR_ON 	1  #define HPSA_INTR_OFF	0 + +/* + * Inbound Post Queue offsets for IO Accelerator Mode 2 + */ +#define IOACCEL2_INBOUND_POSTQ_32	0x48 +#define IOACCEL2_INBOUND_POSTQ_64_LOW	0xd0 +#define IOACCEL2_INBOUND_POSTQ_64_HI	0xd4 +  /*  	Send the command to the hardware  */  static void SA5_submit_command(struct ctlr_info *h,  	struct CommandList *c)  { -	dev_dbg(&h->pdev->dev, "Sending %x, tag = %x\n", c->busaddr, -		c->Header.Tag.lower);  	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);  	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);  } +static void SA5_submit_command_no_read(struct ctlr_info *h, +	struct CommandList *c) +{ +	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); +} + +static void SA5_submit_command_ioaccel2(struct ctlr_info *h, +	struct CommandList *c) +{ +	if (c->cmd_type == CMD_IOACCEL2) +		writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32); +	else +		writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); +} +  /*   *  This card is the opposite of the other cards.   *   0 turns interrupts on... @@ -282,7 +394,7 @@ static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)  static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)  { -	struct reply_pool *rq = &h->reply_queue[q]; +	struct reply_queue_buffer *rq = &h->reply_queue[q];  	unsigned long flags, register_value = FIFO_EMPTY;  	/* msi auto clears the interrupt pending bit. */ @@ -361,7 +473,6 @@ static bool SA5_intr_pending(struct ctlr_info *h)  {  	unsigned long register_value  =  		readl(h->vaddr + SA5_INTR_STATUS); -	dev_dbg(&h->pdev->dev, "intr_pending %lx\n", register_value);  	return register_value & SA5_INTR_PENDING;  } @@ -380,6 +491,50 @@ static bool SA5_performant_intr_pending(struct ctlr_info *h)  	return register_value & SA5_OUTDB_STATUS_PERF_BIT;  } +#define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT    0x100 + +static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h) +{ +	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); + +	return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ? +		true : false; +} + +#define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0 +#define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8 +#define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC +#define IOACCEL_MODE1_REPLY_UNUSED       0xFFFFFFFFFFFFFFFFULL + +static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) +{ +	u64 register_value; +	struct reply_queue_buffer *rq = &h->reply_queue[q]; +	unsigned long flags; + +	BUG_ON(q >= h->nreply_queues); + +	register_value = rq->head[rq->current_entry]; +	if (register_value != IOACCEL_MODE1_REPLY_UNUSED) { +		rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; +		if (++rq->current_entry == rq->size) +			rq->current_entry = 0; +		/* +		 * @todo +		 * +		 * Don't really need to write the new index after each command, +		 * but with current driver design this is easiest. +		 */ +		wmb(); +		writel((q << 24) | rq->current_entry, h->vaddr + +				IOACCEL_MODE1_CONSUMER_INDEX); +		spin_lock_irqsave(&h->lock, flags); +		h->commands_outstanding--; +		spin_unlock_irqrestore(&h->lock, flags); +	} +	return (unsigned long) register_value; +} +  static struct access_method SA5_access = {  	SA5_submit_command,  	SA5_intr_mask, @@ -388,6 +543,22 @@ static struct access_method SA5_access = {  	SA5_completed,  }; +static struct access_method SA5_ioaccel_mode1_access = { +	SA5_submit_command, +	SA5_performant_intr_mask, +	SA5_fifo_full, +	SA5_ioaccel_mode1_intr_pending, +	SA5_ioaccel_mode1_completed, +}; + +static struct access_method SA5_ioaccel_mode2_access = { +	SA5_submit_command_ioaccel2, +	SA5_performant_intr_mask, +	SA5_fifo_full, +	SA5_performant_intr_pending, +	SA5_performant_completed, +}; +  static struct access_method SA5_performant_access = {  	SA5_submit_command,  	SA5_performant_intr_mask, @@ -396,6 +567,14 @@ static struct access_method SA5_performant_access = {  	SA5_performant_completed,  }; +static struct access_method SA5_performant_access_no_read = { +	SA5_submit_command_no_read, +	SA5_performant_intr_mask, +	SA5_fifo_full, +	SA5_performant_intr_pending, +	SA5_performant_completed, +}; +  struct board_type {  	u32	board_id;  	char	*product_name; diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index a894f2eca7a..b5125dc3143 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h @@ -1,6 +1,6 @@  /*   *    Disk Array driver for HP Smart Array SAS controllers - *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. + *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.   *   *    This program is free software; you can redistribute it and/or modify   *    it under the terms of the GNU General Public License as published by @@ -25,6 +25,7 @@  #define SENSEINFOBYTES          32 /* may vary between hbas */  #define SG_ENTRIES_IN_CMD	32 /* Max SG entries excluding chain blocks */  #define HPSA_SG_CHAIN		0x80000000 +#define HPSA_SG_LAST		0x40000000  #define MAXREPLYQS              256  /* Command Status value */ @@ -41,6 +42,8 @@  #define CMD_UNSOLICITED_ABORT   0x000A  #define CMD_TIMEOUT             0x000B  #define CMD_UNABORTABLE		0x000C +#define CMD_IOACCEL_DISABLED	0x000E +  /* Unit Attentions ASC's as defined for the MSA2012sa */  #define POWER_OR_RESET			0x29 @@ -79,8 +82,9 @@  #define ATTR_ACA                0x07  /* cdb type */ -#define TYPE_CMD				0x00 -#define TYPE_MSG				0x01 +#define TYPE_CMD		0x00 +#define TYPE_MSG		0x01 +#define TYPE_IOACCEL2_CMD	0x81 /* 0x81 is not used by hardware */  /* Message Types  */  #define HPSA_TASK_MANAGEMENT    0x00 @@ -125,9 +129,12 @@  #define CFGTBL_AccCmds          0x00000001l  #define DOORBELL_CTLR_RESET	0x00000004l  #define DOORBELL_CTLR_RESET2	0x00000020l +#define DOORBELL_CLEAR_EVENTS	0x00000040l  #define CFGTBL_Trans_Simple     0x00000002l  #define CFGTBL_Trans_Performant 0x00000004l +#define CFGTBL_Trans_io_accel1	0x00000080l +#define CFGTBL_Trans_io_accel2	0x00000100l  #define CFGTBL_Trans_use_short_tags 0x20000000l  #define CFGTBL_Trans_enable_directed_msix (1 << 30) @@ -135,6 +142,28 @@  #define CFGTBL_BusType_Ultra3   0x00000002l  #define CFGTBL_BusType_Fibre1G  0x00000100l  #define CFGTBL_BusType_Fibre2G  0x00000200l + +/* VPD Inquiry types */ +#define HPSA_VPD_SUPPORTED_PAGES        0x00 +#define HPSA_VPD_LV_DEVICE_GEOMETRY     0xC1 +#define HPSA_VPD_LV_IOACCEL_STATUS      0xC2 +#define HPSA_VPD_LV_STATUS		0xC3 +#define HPSA_VPD_HEADER_SZ              4 + +/* Logical volume states */ +#define HPSA_VPD_LV_STATUS_UNSUPPORTED			0xff +#define HPSA_LV_OK                                      0x0 +#define HPSA_LV_UNDERGOING_ERASE			0x0F +#define HPSA_LV_UNDERGOING_RPI				0x12 +#define HPSA_LV_PENDING_RPI				0x13 +#define HPSA_LV_ENCRYPTED_NO_KEY			0x14 +#define HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER	0x15 +#define HPSA_LV_UNDERGOING_ENCRYPTION			0x16 +#define HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING		0x17 +#define HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER	0x18 +#define HPSA_LV_PENDING_ENCRYPTION			0x19 +#define HPSA_LV_PENDING_ENCRYPTION_REKEYING		0x1A +  struct vals32 {  	u32   lower;  	u32   upper; @@ -162,17 +191,68 @@ struct InquiryData {  #define HPSA_REPORT_LOG 0xc2    /* Report Logical LUNs */  #define HPSA_REPORT_PHYS 0xc3   /* Report Physical LUNs */ +#define HPSA_REPORT_PHYS_EXTENDED 0x02 +#define HPSA_CISS_READ	0xc0	/* CISS Read */ +#define HPSA_GET_RAID_MAP 0xc8	/* CISS Get RAID Layout Map */ + +#define RAID_MAP_MAX_ENTRIES   256 + +struct raid_map_disk_data { +	u32   ioaccel_handle;         /**< Handle to access this disk via the +					*  I/O accelerator */ +	u8    xor_mult[2];            /**< XOR multipliers for this position, +					*  valid for data disks only */ +	u8    reserved[2]; +}; + +struct raid_map_data { +	u32   structure_size;		/* Size of entire structure in bytes */ +	u32   volume_blk_size;		/* bytes / block in the volume */ +	u64   volume_blk_cnt;		/* logical blocks on the volume */ +	u8    phys_blk_shift;		/* Shift factor to convert between +					 * units of logical blocks and physical +					 * disk blocks */ +	u8    parity_rotation_shift;	/* Shift factor to convert between units +					 * of logical stripes and physical +					 * stripes */ +	u16   strip_size;		/* blocks used on each disk / stripe */ +	u64   disk_starting_blk;	/* First disk block used in volume */ +	u64   disk_blk_cnt;		/* disk blocks used by volume / disk */ +	u16   data_disks_per_row;	/* data disk entries / row in the map */ +	u16   metadata_disks_per_row;	/* mirror/parity disk entries / row +					 * in the map */ +	u16   row_cnt;			/* rows in each layout map */ +	u16   layout_map_count;		/* layout maps (1 map per mirror/parity +					 * group) */ +	u16   flags;			/* Bit 0 set if encryption enabled */ +#define RAID_MAP_FLAG_ENCRYPT_ON  0x01 +	u16   dekindex;			/* Data encryption key index. */ +	u8    reserved[16]; +	struct raid_map_disk_data data[RAID_MAP_MAX_ENTRIES]; +}; +  struct ReportLUNdata {  	u8 LUNListLength[4]; -	u32 reserved; +	u8 extended_response_flag; +	u8 reserved[3];  	u8 LUN[HPSA_MAX_LUN][8];  }; +struct ext_report_lun_entry { +	u8 lunid[8]; +	u8 wwid[8]; +	u8 device_type; +	u8 device_flags; +	u8 lun_count; /* multi-lun device, how many luns */ +	u8 redundant_paths; +	u32 ioaccel_handle; /* ioaccel1 only uses lower 16 bits */ +}; +  struct ReportExtendedLUNdata {  	u8 LUNListLength[4];  	u8 extended_response_flag;  	u8 reserved[3]; -	u8 LUN[HPSA_MAX_LUN][24]; +	struct ext_report_lun_entry LUN[HPSA_MAX_LUN];  };  struct SenseSubsystem_info { @@ -187,6 +267,7 @@ struct SenseSubsystem_info {  #define BMIC_CACHE_FLUSH 0xc2  #define HPSA_CACHE_FLUSH 0x01	/* C2 was already being used by HPSA */  #define BMIC_FLASH_FIRMWARE 0xF7 +#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64  /* Command List Structure */  union SCSI3Addr { @@ -283,6 +364,8 @@ struct ErrorInfo {  /* Command types */  #define CMD_IOCTL_PEND  0x01  #define CMD_SCSI	0x03 +#define CMD_IOACCEL1	0x04 +#define CMD_IOACCEL2	0x05  #define DIRECT_LOOKUP_SHIFT 5  #define DIRECT_LOOKUP_BIT 0x10 @@ -302,6 +385,7 @@ struct ctlr_info; /* defined in hpsa.h */   *        or a bus address.   */ +#define COMMANDLIST_ALIGNMENT 128  struct CommandList {  	struct CommandListHeader Header;  	struct RequestBlock      Request; @@ -314,29 +398,173 @@ struct CommandList {  	int			   cmd_type;  	long			   cmdindex;  	struct list_head list; -	struct request *rq;  	struct completion *waiting;  	void   *scsi_cmd; +} __aligned(COMMANDLIST_ALIGNMENT); + +/* Max S/G elements in I/O accelerator command */ +#define IOACCEL1_MAXSGENTRIES           24 +#define IOACCEL2_MAXSGENTRIES		28 -/* on 64 bit architectures, to get this to be 32-byte-aligned - * it so happens we need PAD_64 bytes of padding, on 32 bit systems, - * we need PAD_32 bytes of padding (see below).   This does that. - * If it happens that 64 bit and 32 bit systems need different - * padding, PAD_32 and PAD_64 can be set independently, and. - * the code below will do the right thing. +/* + * Structure for I/O accelerator (mode 1) commands. + * Note that this structure must be 128-byte aligned in size.   */ -#define IS_32_BIT ((8 - sizeof(long))/4) -#define IS_64_BIT (!IS_32_BIT) -#define PAD_32 (4) -#define PAD_64 (4) -#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) -	u8 pad[COMMANDLIST_PAD]; +#define IOACCEL1_COMMANDLIST_ALIGNMENT 128 +struct io_accel1_cmd { +	u16 dev_handle;			/* 0x00 - 0x01 */ +	u8  reserved1;			/* 0x02 */ +	u8  function;			/* 0x03 */ +	u8  reserved2[8];		/* 0x04 - 0x0B */ +	u32 err_info;			/* 0x0C - 0x0F */ +	u8  reserved3[2];		/* 0x10 - 0x11 */ +	u8  err_info_len;		/* 0x12 */ +	u8  reserved4;			/* 0x13 */ +	u8  sgl_offset;			/* 0x14 */ +	u8  reserved5[7];		/* 0x15 - 0x1B */ +	u32 transfer_len;		/* 0x1C - 0x1F */ +	u8  reserved6[4];		/* 0x20 - 0x23 */ +	u16 io_flags;			/* 0x24 - 0x25 */ +	u8  reserved7[14];		/* 0x26 - 0x33 */ +	u8  LUN[8];			/* 0x34 - 0x3B */ +	u32 control;			/* 0x3C - 0x3F */ +	u8  CDB[16];			/* 0x40 - 0x4F */ +	u8  reserved8[16];		/* 0x50 - 0x5F */ +	u16 host_context_flags;		/* 0x60 - 0x61 */ +	u16 timeout_sec;		/* 0x62 - 0x63 */ +	u8  ReplyQueue;			/* 0x64 */ +	u8  reserved9[3];		/* 0x65 - 0x67 */ +	struct vals32 Tag;		/* 0x68 - 0x6F */ +	struct vals32 host_addr;	/* 0x70 - 0x77 */ +	u8  CISS_LUN[8];		/* 0x78 - 0x7F */ +	struct SGDescriptor SG[IOACCEL1_MAXSGENTRIES]; +} __aligned(IOACCEL1_COMMANDLIST_ALIGNMENT); + +#define IOACCEL1_FUNCTION_SCSIIO        0x00 +#define IOACCEL1_SGLOFFSET              32 + +#define IOACCEL1_IOFLAGS_IO_REQ         0x4000 +#define IOACCEL1_IOFLAGS_CDBLEN_MASK    0x001F +#define IOACCEL1_IOFLAGS_CDBLEN_MAX     16 + +#define IOACCEL1_CONTROL_NODATAXFER     0x00000000 +#define IOACCEL1_CONTROL_DATA_OUT       0x01000000 +#define IOACCEL1_CONTROL_DATA_IN        0x02000000 +#define IOACCEL1_CONTROL_TASKPRIO_MASK  0x00007800 +#define IOACCEL1_CONTROL_TASKPRIO_SHIFT 11 +#define IOACCEL1_CONTROL_SIMPLEQUEUE    0x00000000 +#define IOACCEL1_CONTROL_HEADOFQUEUE    0x00000100 +#define IOACCEL1_CONTROL_ORDEREDQUEUE   0x00000200 +#define IOACCEL1_CONTROL_ACA            0x00000400 + +#define IOACCEL1_HCFLAGS_CISS_FORMAT    0x0013 + +#define IOACCEL1_BUSADDR_CMDTYPE        0x00000060 + +struct ioaccel2_sg_element { +	u64 address; +	u32 length; +	u8 reserved[3]; +	u8 chain_indicator; +#define IOACCEL2_CHAIN 0x80 +}; + +/* + * SCSI Response Format structure for IO Accelerator Mode 2 + */ +struct io_accel2_scsi_response { +	u8 IU_type; +#define IOACCEL2_IU_TYPE_SRF			0x60 +	u8 reserved1[3]; +	u8 req_id[4];		/* request identifier */ +	u8 reserved2[4]; +	u8 serv_response;		/* service response */ +#define IOACCEL2_SERV_RESPONSE_COMPLETE		0x000 +#define IOACCEL2_SERV_RESPONSE_FAILURE		0x001 +#define IOACCEL2_SERV_RESPONSE_TMF_COMPLETE	0x002 +#define IOACCEL2_SERV_RESPONSE_TMF_SUCCESS	0x003 +#define IOACCEL2_SERV_RESPONSE_TMF_REJECTED	0x004 +#define IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN	0x005 +	u8 status;			/* status */ +#define IOACCEL2_STATUS_SR_TASK_COMP_GOOD	0x00 +#define IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND	0x02 +#define IOACCEL2_STATUS_SR_TASK_COMP_BUSY	0x08 +#define IOACCEL2_STATUS_SR_TASK_COMP_RES_CON	0x18 +#define IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL	0x28 +#define IOACCEL2_STATUS_SR_TASK_COMP_ABORTED	0x40 +#define IOACCEL2_STATUS_SR_IOACCEL_DISABLED	0x0E +	u8 data_present;		/* low 2 bits */ +#define IOACCEL2_NO_DATAPRESENT		0x000 +#define IOACCEL2_RESPONSE_DATAPRESENT	0x001 +#define IOACCEL2_SENSE_DATA_PRESENT	0x002 +#define IOACCEL2_RESERVED		0x003 +	u8 sense_data_len;		/* sense/response data length */ +	u8 resid_cnt[4];		/* residual count */ +	u8 sense_data_buff[32];		/* sense/response data buffer */ +}; + +/* + * Structure for I/O accelerator (mode 2 or m2) commands. + * Note that this structure must be 128-byte aligned in size. + */ +#define IOACCEL2_COMMANDLIST_ALIGNMENT 128 +struct io_accel2_cmd { +	u8  IU_type;			/* IU Type */ +	u8  direction;			/* direction, memtype, and encryption */ +#define IOACCEL2_DIRECTION_MASK		0x03 /* bits 0,1: direction  */ +#define IOACCEL2_DIRECTION_MEMTYPE_MASK	0x04 /* bit 2: memtype source/dest */ +					     /*     0b=PCIe, 1b=DDR */ +#define IOACCEL2_DIRECTION_ENCRYPT_MASK	0x08 /* bit 3: encryption flag */ +					     /*     0=off, 1=on */ +	u8  reply_queue;		/* Reply Queue ID */ +	u8  reserved1;			/* Reserved */ +	u32 scsi_nexus;			/* Device Handle */ +	u32 Tag;			/* cciss tag, lower 4 bytes only */ +	u32 tweak_lower;		/* Encryption tweak, lower 4 bytes */ +	u8  cdb[16];			/* SCSI Command Descriptor Block */ +	u8  cciss_lun[8];		/* 8 byte SCSI address */ +	u32 data_len;			/* Total bytes to transfer */ +	u8  cmd_priority_task_attr;	/* priority and task attrs */ +#define IOACCEL2_PRIORITY_MASK 0x78 +#define IOACCEL2_ATTR_MASK 0x07 +	u8  sg_count;			/* Number of sg elements */ +	u16 dekindex;			/* Data encryption key index */ +	u64 err_ptr;			/* Error Pointer */ +	u32 err_len;			/* Error Length*/ +	u32 tweak_upper;		/* Encryption tweak, upper 4 bytes */ +	struct ioaccel2_sg_element sg[IOACCEL2_MAXSGENTRIES]; +	struct io_accel2_scsi_response error_data; +} __aligned(IOACCEL2_COMMANDLIST_ALIGNMENT); + +/* + * defines for Mode 2 command struct + * FIXME: this can't be all I need mfm + */ +#define IOACCEL2_IU_TYPE	0x40 +#define IOACCEL2_IU_TMF_TYPE	0x41 +#define IOACCEL2_DIR_NO_DATA	0x00 +#define IOACCEL2_DIR_DATA_IN	0x01 +#define IOACCEL2_DIR_DATA_OUT	0x02 +/* + * SCSI Task Management Request format for Accelerator Mode 2 + */ +struct hpsa_tmf_struct { +	u8 iu_type;		/* Information Unit Type */ +	u8 reply_queue;		/* Reply Queue ID */ +	u8 tmf;			/* Task Management Function */ +	u8 reserved1;		/* byte 3 Reserved */ +	u32 it_nexus;		/* SCSI I-T Nexus */ +	u8 lun_id[8];		/* LUN ID for TMF request */ +	struct vals32 Tag;	/* cciss tag associated w/ request */ +	struct vals32 abort_tag;/* cciss tag of SCSI cmd or task to abort */ +	u64 error_ptr;		/* Error Pointer */ +	u32 error_len;		/* Error Length */  };  /* Configuration Table Structure */  struct HostWrite {  	u32 TransportRequest; -	u32 Reserved; +	u32 command_pool_addr_hi;  	u32 CoalIntDelay;  	u32 CoalIntCount;  }; @@ -344,6 +572,9 @@ struct HostWrite {  #define SIMPLE_MODE     0x02  #define PERFORMANT_MODE 0x04  #define MEMQ_MODE       0x08 +#define IOACCEL_MODE_1  0x80 + +#define DRIVER_SUPPORT_UA_ENABLE        0x00000001  struct CfgTable {  	u8            Signature[4]; @@ -356,7 +587,9 @@ struct CfgTable {  	u32           TransMethodOffset;  	u8            ServerName[16];  	u32           HeartBeat; -	u32           SCSI_Prefetch; +	u32           driver_support; +#define			ENABLE_SCSI_PREFETCH 0x100 +#define			ENABLE_UNIT_ATTN 0x01  	u32	 	MaxScatterGatherElements;  	u32		MaxLogicalUnits;  	u32		MaxPhysicalDevices; @@ -371,8 +604,18 @@ struct CfgTable {  	u32		misc_fw_support; /* offset 0x78 */  #define			MISC_FW_DOORBELL_RESET (0x02)  #define			MISC_FW_DOORBELL_RESET2 (0x010) +#define			MISC_FW_RAID_OFFLOAD_BASIC (0x020) +#define			MISC_FW_EVENT_NOTIFY (0x080)  	u8		driver_version[32]; - +	u32             max_cached_write_size; +	u8              driver_scratchpad[16]; +	u32             max_error_info_length; +	u32		io_accel_max_embedded_sg_count; +	u32		io_accel_request_size_offset; +	u32		event_notify; +#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE (1 << 30) +#define HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE (1 << 31) +	u32		clear_event_notify;  };  #define NUM_BLOCKFETCH_ENTRIES 8 @@ -382,7 +625,7 @@ struct TransTable_struct {  	u32            RepQCount;  	u32            RepQCtrAddrLow32;  	u32            RepQCtrAddrHigh32; -#define MAX_REPLY_QUEUES 8 +#define MAX_REPLY_QUEUES 64  	struct vals32  RepQAddr[MAX_REPLY_QUEUES];  }; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 23f5ba5e647..8dd47689d58 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -4515,7 +4515,7 @@ static int ibmvfc_work(void *data)  	struct ibmvfc_host *vhost = data;  	int rc; -	set_user_nice(current, -20); +	set_user_nice(current, MIN_NICE);  	while (1) {  		rc = wait_event_interruptible(vhost->work_wait_q, diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index fa764406df6..7b23f21f22f 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)  	if (crq->valid & 0x80) {  		if (++queue->cur == queue->size)  			queue->cur = 0; + +		/* Ensure the read of the valid bit occurs before reading any +		 * other bits of the CRQ entry +		 */ +		rmb();  	} else  		crq = NULL;  	spin_unlock_irqrestore(&queue->lock, flags); @@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,  {  	struct vio_dev *vdev = to_vio_dev(hostdata->dev); +	/* +	 * Ensure the command buffer is flushed to memory before handing it +	 * over to the VIOS to prevent it from fetching any stale data. +	 */ +	mb();  	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);  } @@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)  				       evt->hostdata->dev);  			if (evt->cmnd_done)  				evt->cmnd_done(evt->cmnd); -		} else if (evt->done) +		} else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT && +			   evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)  			evt->done(evt);  		free_event_struct(&evt->hostdata->pool, evt);  		spin_lock_irqsave(hostdata->host->host_lock, flags); @@ -2213,7 +2224,7 @@ static int ibmvscsi_work(void *data)  	struct ibmvscsi_host_data *hostdata = data;  	int rc; -	set_user_nice(current, -20); +	set_user_nice(current, MIN_NICE);  	while (1) {  		rc = wait_event_interruptible(hostdata->work_wait_q, diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index bf9eca84516..56f8a861ed7 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c @@ -589,7 +589,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)  	}  	err = request_irq(vport->dma_dev->irq, &ibmvstgt_interrupt, -			  IRQF_DISABLED, "ibmvstgt", target); +			  0, "ibmvstgt", target);  	if (err)  		goto req_irq_failed; diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index bf028218ac3..b1c4d831137 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c @@ -2015,7 +2015,7 @@ static int __init in2000_detect(struct scsi_host_template * tpnt)  		write1_io(0, IO_FIFO_READ);	/* start fifo out in read mode */  		write1_io(0, IO_INTR_MASK);	/* allow all ints */  		x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT]; -		if (request_irq(x, in2000_intr, IRQF_DISABLED, "in2000", instance)) { +		if (request_irq(x, in2000_intr, 0, "in2000", instance)) {  			printk("in2000_detect: Unable to allocate IRQ.\n");  			detect_count--;  			continue; diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 280d5af113d..e5dae7b54d9 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -2931,7 +2931,7 @@ static int initio_probe_one(struct pci_dev *pdev,  	shost->base = host->addr;  	shost->sg_tablesize = TOTAL_SG_ENTRY; -	error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost); +	error = request_irq(pdev->irq, i91u_intr, IRQF_SHARED, "i91u", shost);  	if (error < 0) {  		printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);  		goto out_free_scbs; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 36ac1c34ce9..924b0ba74df 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -220,7 +220,7 @@ module_param_named(max_devs, ipr_max_devs, int, 0);  MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "  		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");  module_param_named(number_of_msix, ipr_number_of_msix, int, 0); -MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5).  (default:2)"); +MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:2)");  MODULE_LICENSE("GPL");  MODULE_VERSION(IPR_DRIVER_VERSION); @@ -1143,6 +1143,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,  	res->add_to_ml = 0;  	res->del_from_ml = 0;  	res->resetting_device = 0; +	res->reset_occurred = 0;  	res->sdev = NULL;  	res->sata_port = NULL; @@ -2367,6 +2368,42 @@ static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,  }  /** + * ipr_log_sis64_device_error - Log a cache error. + * @ioa_cfg:	ioa config struct + * @hostrcb:	hostrcb struct + * + * Return value: + * 	none + **/ +static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg, +					 struct ipr_hostrcb *hostrcb) +{ +	struct ipr_hostrcb_type_21_error *error; +	char buffer[IPR_MAX_RES_PATH_LENGTH]; + +	error = &hostrcb->hcam.u.error64.u.type_21_error; + +	ipr_err("-----Failing Device Information-----\n"); +	ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n", +		be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]), +		 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3])); +	ipr_err("Device Resource Path: %s\n", +		__ipr_format_res_path(error->res_path, +				      buffer, sizeof(buffer))); +	error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0'; +	error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0'; +	ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc); +	ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc); +	ipr_err("SCSI Sense Data:\n"); +	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data)); +	ipr_err("SCSI Command Descriptor Block: \n"); +	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb)); + +	ipr_err("Additional IOA Data:\n"); +	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error)); +} + +/**   * ipr_get_error - Find the specfied IOASC in the ipr_error_table.   * @ioasc:	IOASC   * @@ -2467,6 +2504,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,  	case IPR_HOST_RCB_OVERLAY_ID_20:  		ipr_log_fabric_error(ioa_cfg, hostrcb);  		break; +	case IPR_HOST_RCB_OVERLAY_ID_21: +		ipr_log_sis64_device_error(ioa_cfg, hostrcb); +		break;  	case IPR_HOST_RCB_OVERLAY_ID_23:  		ipr_log_sis64_config_error(ioa_cfg, hostrcb);  		break; @@ -3630,16 +3670,14 @@ static ssize_t ipr_store_iopoll_weight(struct device *dev,  		return strlen(buf);  	} -	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && -			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { +	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {  		for (i = 1; i < ioa_cfg->hrrq_num; i++)  			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);  	}  	spin_lock_irqsave(shost->host_lock, lock_flags);  	ioa_cfg->iopoll_weight = user_iopoll_weight; -	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && -			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { +	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {  		for (i = 1; i < ioa_cfg->hrrq_num; i++) {  			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,  					ioa_cfg->iopoll_weight, ipr_iopoll); @@ -5015,6 +5053,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)  	} else  		rc = ipr_device_reset(ioa_cfg, res);  	res->resetting_device = 0; +	res->reset_occurred = 1;  	LEAVE;  	return rc ? FAILED : SUCCESS; @@ -5484,8 +5523,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)  		return IRQ_NONE;  	} -	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && -			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { +	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {  		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==  		       hrrq->toggle_bit) {  			if (!blk_iopoll_sched_prep(&hrrq->iopoll)) @@ -6183,8 +6221,10 @@ static int ipr_queuecommand(struct Scsi_Host *shost,  			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;  		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC; -		if (ipr_is_gscsi(res)) +		if (ipr_is_gscsi(res) && res->reset_occurred) { +			res->reset_occurred = 0;  			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST; +		}  		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;  		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);  	} @@ -6305,7 +6345,8 @@ static struct scsi_host_template driver_template = {  	.use_clustering = ENABLE_CLUSTERING,  	.shost_attrs = ipr_ioa_attrs,  	.sdev_attrs = ipr_dev_attrs, -	.proc_name = IPR_NAME +	.proc_name = IPR_NAME, +	.no_write_same = 1,  };  /** @@ -8640,6 +8681,25 @@ static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)  }  /** + * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled + * @pdev:	PCI device struct + * + * Description: This routine is called to tell us that the MMIO + * access to the IOA has been restored + */ +static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev) +{ +	unsigned long flags = 0; +	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev); + +	spin_lock_irqsave(ioa_cfg->host->host_lock, flags); +	if (!ioa_cfg->probe_done) +		pci_save_state(pdev); +	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); +	return PCI_ERS_RESULT_NEED_RESET; +} + +/**   * ipr_pci_frozen - Called when slot has experienced a PCI bus error.   * @pdev:	PCI device struct   * @@ -8653,7 +8713,8 @@ static void ipr_pci_frozen(struct pci_dev *pdev)  	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);  	spin_lock_irqsave(ioa_cfg->host->host_lock, flags); -	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE); +	if (ioa_cfg->probe_done) +		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);  } @@ -8671,11 +8732,14 @@ static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)  	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);  	spin_lock_irqsave(ioa_cfg->host->host_lock, flags); -	if (ioa_cfg->needs_warm_reset) -		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); -	else -		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, -					IPR_SHUTDOWN_NONE); +	if (ioa_cfg->probe_done) { +		if (ioa_cfg->needs_warm_reset) +			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); +		else +			_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space, +						IPR_SHUTDOWN_NONE); +	} else +		wake_up_all(&ioa_cfg->eeh_wait_q);  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);  	return PCI_ERS_RESULT_RECOVERED;  } @@ -8694,17 +8758,20 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)  	int i;  	spin_lock_irqsave(ioa_cfg->host->host_lock, flags); -	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) -		ioa_cfg->sdt_state = ABORT_DUMP; -	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; -	ioa_cfg->in_ioa_bringdown = 1; -	for (i = 0; i < ioa_cfg->hrrq_num; i++) { -		spin_lock(&ioa_cfg->hrrq[i]._lock); -		ioa_cfg->hrrq[i].allow_cmds = 0; -		spin_unlock(&ioa_cfg->hrrq[i]._lock); -	} -	wmb(); -	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); +	if (ioa_cfg->probe_done) { +		if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) +			ioa_cfg->sdt_state = ABORT_DUMP; +		ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; +		ioa_cfg->in_ioa_bringdown = 1; +		for (i = 0; i < ioa_cfg->hrrq_num; i++) { +			spin_lock(&ioa_cfg->hrrq[i]._lock); +			ioa_cfg->hrrq[i].allow_cmds = 0; +			spin_unlock(&ioa_cfg->hrrq[i]._lock); +		} +		wmb(); +		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); +	} else +		wake_up_all(&ioa_cfg->eeh_wait_q);  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);  } @@ -8724,7 +8791,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,  	switch (state) {  	case pci_channel_io_frozen:  		ipr_pci_frozen(pdev); -		return PCI_ERS_RESULT_NEED_RESET; +		return PCI_ERS_RESULT_CAN_RECOVER;  	case pci_channel_io_perm_failure:  		ipr_pci_perm_failure(pdev);  		return PCI_ERS_RESULT_DISCONNECT; @@ -8754,6 +8821,7 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)  	ENTER;  	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);  	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg); +	ioa_cfg->probe_done = 1;  	if (ioa_cfg->needs_hard_reset) {  		ioa_cfg->needs_hard_reset = 0;  		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); @@ -9029,16 +9097,6 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)  	if (!ioa_cfg->vpd_cbs)  		goto out_free_res_entries; -	for (i = 0; i < ioa_cfg->hrrq_num; i++) { -		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); -		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); -		spin_lock_init(&ioa_cfg->hrrq[i]._lock); -		if (i == 0) -			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; -		else -			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock; -	} -  	if (ipr_alloc_cmd_blks(ioa_cfg))  		goto out_free_vpd_cbs; @@ -9139,6 +9197,48 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)  }  /** + * ipr_init_regs - Initialize IOA registers + * @ioa_cfg:	ioa config struct + * + * Return value: + *	none + **/ +static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg) +{ +	const struct ipr_interrupt_offsets *p; +	struct ipr_interrupts *t; +	void __iomem *base; + +	p = &ioa_cfg->chip_cfg->regs; +	t = &ioa_cfg->regs; +	base = ioa_cfg->hdw_dma_regs; + +	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; +	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; +	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; +	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; +	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; +	t->clr_interrupt_reg = base + p->clr_interrupt_reg; +	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; +	t->sense_interrupt_reg = base + p->sense_interrupt_reg; +	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; +	t->ioarrin_reg = base + p->ioarrin_reg; +	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; +	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; +	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; +	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; +	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; +	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; + +	if (ioa_cfg->sis64) { +		t->init_feedback_reg = base + p->init_feedback_reg; +		t->dump_addr_reg = base + p->dump_addr_reg; +		t->dump_data_reg = base + p->dump_data_reg; +		t->endian_swap_reg = base + p->endian_swap_reg; +	} +} + +/**   * ipr_init_ioa_cfg - Initialize IOA config struct   * @ioa_cfg:	ioa config struct   * @host:		scsi host struct @@ -9150,9 +9250,7 @@ static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)  static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,  			     struct Scsi_Host *host, struct pci_dev *pdev)  { -	const struct ipr_interrupt_offsets *p; -	struct ipr_interrupts *t; -	void __iomem *base; +	int i;  	ioa_cfg->host = host;  	ioa_cfg->pdev = pdev; @@ -9172,6 +9270,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,  	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);  	init_waitqueue_head(&ioa_cfg->reset_wait_q);  	init_waitqueue_head(&ioa_cfg->msi_wait_q); +	init_waitqueue_head(&ioa_cfg->eeh_wait_q);  	ioa_cfg->sdt_state = INACTIVE;  	ipr_initialize_bus_attr(ioa_cfg); @@ -9182,44 +9281,33 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,  		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;  		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)  			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS; +		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) +					   + ((sizeof(struct ipr_config_table_entry64) +					       * ioa_cfg->max_devs_supported)));  	} else {  		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;  		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;  		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)  			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS; +		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) +					   + ((sizeof(struct ipr_config_table_entry) +					       * ioa_cfg->max_devs_supported)));  	} +  	host->max_channel = IPR_MAX_BUS_TO_SCAN;  	host->unique_id = host->host_no;  	host->max_cmd_len = IPR_MAX_CDB_LEN;  	host->can_queue = ioa_cfg->max_cmds;  	pci_set_drvdata(pdev, ioa_cfg); -	p = &ioa_cfg->chip_cfg->regs; -	t = &ioa_cfg->regs; -	base = ioa_cfg->hdw_dma_regs; - -	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg; -	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg; -	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32; -	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg; -	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32; -	t->clr_interrupt_reg = base + p->clr_interrupt_reg; -	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32; -	t->sense_interrupt_reg = base + p->sense_interrupt_reg; -	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32; -	t->ioarrin_reg = base + p->ioarrin_reg; -	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg; -	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32; -	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg; -	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32; -	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg; -	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32; - -	if (ioa_cfg->sis64) { -		t->init_feedback_reg = base + p->init_feedback_reg; -		t->dump_addr_reg = base + p->dump_addr_reg; -		t->dump_data_reg = base + p->dump_data_reg; -		t->endian_swap_reg = base + p->endian_swap_reg; +	for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) { +		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q); +		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q); +		spin_lock_init(&ioa_cfg->hrrq[i]._lock); +		if (i == 0) +			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock; +		else +			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;  	}  } @@ -9242,54 +9330,63 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)  	return NULL;  } +/** + * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete + *						during probe time + * @ioa_cfg:	ioa config struct + * + * Return value: + * 	None + **/ +static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg) +{ +	struct pci_dev *pdev = ioa_cfg->pdev; + +	if (pci_channel_offline(pdev)) { +		wait_event_timeout(ioa_cfg->eeh_wait_q, +				   !pci_channel_offline(pdev), +				   IPR_PCI_ERROR_RECOVERY_TIMEOUT); +		pci_restore_state(pdev); +	} +} +  static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)  {  	struct msix_entry entries[IPR_MAX_MSIX_VECTORS]; -	int i, err, vectors; +	int i, vectors;  	for (i = 0; i < ARRAY_SIZE(entries); ++i)  		entries[i].entry = i; -	vectors = ipr_number_of_msix; - -	while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0) -			vectors = err; - -	if (err < 0) { -		pci_disable_msix(ioa_cfg->pdev); -		return err; +	vectors = pci_enable_msix_range(ioa_cfg->pdev, +					entries, 1, ipr_number_of_msix); +	if (vectors < 0) { +		ipr_wait_for_pci_err_recovery(ioa_cfg); +		return vectors;  	} -	if (!err) { -		for (i = 0; i < vectors; i++) -			ioa_cfg->vectors_info[i].vec = entries[i].vector; -		ioa_cfg->nvectors = vectors; -	} +	for (i = 0; i < vectors; i++) +		ioa_cfg->vectors_info[i].vec = entries[i].vector; +	ioa_cfg->nvectors = vectors; -	return err; +	return 0;  }  static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)  { -	int i, err, vectors; - -	vectors = ipr_number_of_msix; +	int i, vectors; -	while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0) -			vectors = err; - -	if (err < 0) { -		pci_disable_msi(ioa_cfg->pdev); -		return err; +	vectors = pci_enable_msi_range(ioa_cfg->pdev, 1, ipr_number_of_msix); +	if (vectors < 0) { +		ipr_wait_for_pci_err_recovery(ioa_cfg); +		return vectors;  	} -	if (!err) { -		for (i = 0; i < vectors; i++) -			ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; -		ioa_cfg->nvectors = vectors; -	} +	for (i = 0; i < vectors; i++) +		ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i; +	ioa_cfg->nvectors = vectors; -	return err; +	return 0;  }  static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg) @@ -9354,7 +9451,7 @@ static irqreturn_t ipr_test_intr(int irq, void *devp)   * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.   * @pdev:		PCI device struct   * - * Description: The return value from pci_enable_msi() can not always be + * Description: The return value from pci_enable_msi_range() can not always be   * trusted.  This routine sets up and initiates a test interrupt to determine   * if the interrupt is received via the ipr_test_intr() service routine.   * If the tests fails, the driver will fall back to LSI. @@ -9433,19 +9530,13 @@ static int ipr_probe_ioa(struct pci_dev *pdev,  	ENTER; -	if ((rc = pci_enable_device(pdev))) { -		dev_err(&pdev->dev, "Cannot enable adapter\n"); -		goto out; -	} -  	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq); -  	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));  	if (!host) {  		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");  		rc = -ENOMEM; -		goto out_disable; +		goto out;  	}  	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata; @@ -9475,6 +9566,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev,  	ioa_cfg->revid = pdev->revision; +	ipr_init_ioa_cfg(ioa_cfg, host, pdev); +  	ipr_regs_pci = pci_resource_start(pdev, 0);  	rc = pci_request_regions(pdev, IPR_NAME); @@ -9484,22 +9577,35 @@ static int ipr_probe_ioa(struct pci_dev *pdev,  		goto out_scsi_host_put;  	} +	rc = pci_enable_device(pdev); + +	if (rc || pci_channel_offline(pdev)) { +		if (pci_channel_offline(pdev)) { +			ipr_wait_for_pci_err_recovery(ioa_cfg); +			rc = pci_enable_device(pdev); +		} + +		if (rc) { +			dev_err(&pdev->dev, "Cannot enable adapter\n"); +			ipr_wait_for_pci_err_recovery(ioa_cfg); +			goto out_release_regions; +		} +	} +  	ipr_regs = pci_ioremap_bar(pdev, 0);  	if (!ipr_regs) {  		dev_err(&pdev->dev,  			"Couldn't map memory range of registers\n");  		rc = -ENOMEM; -		goto out_release_regions; +		goto out_disable;  	}  	ioa_cfg->hdw_dma_regs = ipr_regs;  	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;  	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs; -	ipr_init_ioa_cfg(ioa_cfg, host, pdev); - -	pci_set_master(pdev); +	ipr_init_regs(ioa_cfg);  	if (ioa_cfg->sis64) {  		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); @@ -9507,7 +9613,6 @@ static int ipr_probe_ioa(struct pci_dev *pdev,  			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");  			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));  		} -  	} else  		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); @@ -9521,10 +9626,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,  	if (rc != PCIBIOS_SUCCESSFUL) {  		dev_err(&pdev->dev, "Write of cache line size failed\n"); +		ipr_wait_for_pci_err_recovery(ioa_cfg);  		rc = -EIO;  		goto cleanup_nomem;  	} +	/* Issue MMIO read to ensure card is not in EEH */ +	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg); +	ipr_wait_for_pci_err_recovery(ioa_cfg); +  	if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {  		dev_err(&pdev->dev, "The max number of MSIX is %d\n",  			IPR_MAX_MSIX_VECTORS); @@ -9543,10 +9653,22 @@ static int ipr_probe_ioa(struct pci_dev *pdev,  		dev_info(&pdev->dev, "Cannot enable MSI.\n");  	} +	pci_set_master(pdev); + +	if (pci_channel_offline(pdev)) { +		ipr_wait_for_pci_err_recovery(ioa_cfg); +		pci_set_master(pdev); +		if (pci_channel_offline(pdev)) { +			rc = -EIO; +			goto out_msi_disable; +		} +	} +  	if (ioa_cfg->intr_flag == IPR_USE_MSI ||  	    ioa_cfg->intr_flag == IPR_USE_MSIX) {  		rc = ipr_test_msi(ioa_cfg, pdev);  		if (rc == -EOPNOTSUPP) { +			ipr_wait_for_pci_err_recovery(ioa_cfg);  			if (ioa_cfg->intr_flag == IPR_USE_MSI) {  				ioa_cfg->intr_flag &= ~IPR_USE_MSI;  				pci_disable_msi(pdev); @@ -9576,30 +9698,12 @@ static int ipr_probe_ioa(struct pci_dev *pdev,  				(unsigned int)num_online_cpus(),  				(unsigned int)IPR_MAX_HRRQ_NUM); -	/* Save away PCI config space for use following IOA reset */ -	rc = pci_save_state(pdev); - -	if (rc != PCIBIOS_SUCCESSFUL) { -		dev_err(&pdev->dev, "Failed to save PCI config space\n"); -		rc = -EIO; -		goto out_msi_disable; -	} -  	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))  		goto out_msi_disable;  	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))  		goto out_msi_disable; -	if (ioa_cfg->sis64) -		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64) -				+ ((sizeof(struct ipr_config_table_entry64) -				* ioa_cfg->max_devs_supported))); -	else -		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr) -				+ ((sizeof(struct ipr_config_table_entry) -				* ioa_cfg->max_devs_supported))); -  	rc = ipr_alloc_mem(ioa_cfg);  	if (rc < 0) {  		dev_err(&pdev->dev, @@ -9607,6 +9711,15 @@ static int ipr_probe_ioa(struct pci_dev *pdev,  		goto out_msi_disable;  	} +	/* Save away PCI config space for use following IOA reset */ +	rc = pci_save_state(pdev); + +	if (rc != PCIBIOS_SUCCESSFUL) { +		dev_err(&pdev->dev, "Failed to save PCI config space\n"); +		rc = -EIO; +		goto cleanup_nolog; +	} +  	/*  	 * If HRRQ updated interrupt is not masked, or reset alert is set,  	 * the card is in an unknown state and needs a hard reset @@ -9663,18 +9776,19 @@ out:  cleanup_nolog:  	ipr_free_mem(ioa_cfg);  out_msi_disable: +	ipr_wait_for_pci_err_recovery(ioa_cfg);  	if (ioa_cfg->intr_flag == IPR_USE_MSI)  		pci_disable_msi(pdev);  	else if (ioa_cfg->intr_flag == IPR_USE_MSIX)  		pci_disable_msix(pdev);  cleanup_nomem:  	iounmap(ipr_regs); +out_disable: +	pci_disable_device(pdev);  out_release_regions:  	pci_release_regions(pdev);  out_scsi_host_put:  	scsi_host_put(host); -out_disable: -	pci_disable_device(pdev);  	goto out;  } @@ -9858,8 +9972,7 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)  	ioa_cfg->host->max_channel = IPR_VSET_BUS;  	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight; -	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && -			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { +	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {  		for (i = 1; i < ioa_cfg->hrrq_num; i++) {  			blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,  					ioa_cfg->iopoll_weight, ipr_iopoll); @@ -9888,8 +10001,7 @@ static void ipr_shutdown(struct pci_dev *pdev)  	int i;  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); -	if (blk_iopoll_enabled && ioa_cfg->iopoll_weight && -			ioa_cfg->sis64 && ioa_cfg->nvectors > 1) { +	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {  		ioa_cfg->iopoll_weight = 0;  		for (i = 1; i < ioa_cfg->hrrq_num; i++)  			blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll); @@ -9993,6 +10105,8 @@ static struct pci_device_id ipr_pci_table[] = {  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, +		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 }, +	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 }, @@ -10004,12 +10118,19 @@ static struct pci_device_id ipr_pci_table[] = {  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 }, +	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, +		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 }, +	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, +		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 }, +	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, +		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },  	{ }  };  MODULE_DEVICE_TABLE(pci, ipr_pci_table);  static const struct pci_error_handlers ipr_err_handler = {  	.error_detected = ipr_pci_error_detected, +	.mmio_enabled = ipr_pci_mmio_enabled,  	.slot_reset = ipr_pci_slot_reset,  }; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index cad1483f05d..31ed126f714 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -101,12 +101,16 @@  #define IPR_SUBS_DEV_ID_57D7    0x03FF  #define IPR_SUBS_DEV_ID_57D8    0x03FE  #define IPR_SUBS_DEV_ID_57D9    0x046D +#define IPR_SUBS_DEV_ID_57DA    0x04CA  #define IPR_SUBS_DEV_ID_57EB    0x0474  #define IPR_SUBS_DEV_ID_57EC    0x0475  #define IPR_SUBS_DEV_ID_57ED    0x0499  #define IPR_SUBS_DEV_ID_57EE    0x049A  #define IPR_SUBS_DEV_ID_57EF    0x049B  #define IPR_SUBS_DEV_ID_57F0    0x049C +#define IPR_SUBS_DEV_ID_2CCA	0x04C7 +#define IPR_SUBS_DEV_ID_2CD2	0x04C8 +#define IPR_SUBS_DEV_ID_2CCD	0x04C9  #define IPR_NAME				"ipr"  /* @@ -230,6 +234,7 @@  #define IPR_WAIT_FOR_RESET_TIMEOUT		(2 * HZ)  #define IPR_CHECK_FOR_RESET_TIMEOUT		(HZ / 10)  #define IPR_WAIT_FOR_BIST_TIMEOUT		(2 * HZ) +#define IPR_PCI_ERROR_RECOVERY_TIMEOUT	(120 * HZ)  #define IPR_PCI_RESET_TIMEOUT			(HZ / 2)  #define IPR_SIS32_DUMP_TIMEOUT			(15 * HZ)  #define IPR_SIS64_DUMP_TIMEOUT			(40 * HZ) @@ -301,7 +306,7 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)   * Dump literals   */  #define IPR_FMT2_MAX_IOA_DUMP_SIZE			(4 * 1024 * 1024) -#define IPR_FMT3_MAX_IOA_DUMP_SIZE			(32 * 1024 * 1024) +#define IPR_FMT3_MAX_IOA_DUMP_SIZE			(80 * 1024 * 1024)  #define IPR_FMT2_NUM_SDT_ENTRIES			511  #define IPR_FMT3_NUM_SDT_ENTRIES			0xFFF  #define IPR_FMT2_MAX_NUM_DUMP_PAGES	((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1) @@ -311,7 +316,7 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)   * Misc literals   */  #define IPR_NUM_IOADL_ENTRIES			IPR_MAX_SGLIST -#define IPR_MAX_MSIX_VECTORS		0x5 +#define IPR_MAX_MSIX_VECTORS		0x10  #define IPR_MAX_HRRQ_NUM		0x10  #define IPR_INIT_HRRQ			0x0 @@ -897,6 +902,18 @@ struct ipr_hostrcb_type_01_error {  	__be32 ioa_data[236];  }__attribute__((packed, aligned (4))); +struct ipr_hostrcb_type_21_error { +	__be32 wwn[4]; +	u8 res_path[8]; +	u8 primary_problem_desc[32]; +	u8 second_problem_desc[32]; +	__be32 sense_data[8]; +	__be32 cdb[4]; +	__be32 residual_trans_length; +	__be32 length_of_error; +	__be32 ioa_data[236]; +}__attribute__((packed, aligned (4))); +  struct ipr_hostrcb_type_02_error {  	struct ipr_vpd ioa_vpd;  	struct ipr_vpd cfc_vpd; @@ -1126,6 +1143,7 @@ struct ipr_hostrcb64_error {  		struct ipr_hostrcb_type_ff_error type_ff_error;  		struct ipr_hostrcb_type_12_error type_12_error;  		struct ipr_hostrcb_type_17_error type_17_error; +		struct ipr_hostrcb_type_21_error type_21_error;  		struct ipr_hostrcb_type_23_error type_23_error;  		struct ipr_hostrcb_type_24_error type_24_error;  		struct ipr_hostrcb_type_30_error type_30_error; @@ -1169,6 +1187,7 @@ struct ipr_hcam {  #define IPR_HOST_RCB_OVERLAY_ID_16				0x16  #define IPR_HOST_RCB_OVERLAY_ID_17				0x17  #define IPR_HOST_RCB_OVERLAY_ID_20				0x20 +#define IPR_HOST_RCB_OVERLAY_ID_21				0x21  #define IPR_HOST_RCB_OVERLAY_ID_23				0x23  #define IPR_HOST_RCB_OVERLAY_ID_24				0x24  #define IPR_HOST_RCB_OVERLAY_ID_26				0x26 @@ -1252,6 +1271,7 @@ struct ipr_resource_entry {  	u8 add_to_ml:1;  	u8 del_from_ml:1;  	u8 resetting_device:1; +	u8 reset_occurred:1;  	u32 bus;		/* AKA channel */  	u32 target;		/* AKA id */ @@ -1441,6 +1461,7 @@ struct ipr_ioa_cfg {  	u8 dump_timeout:1;  	u8 cfg_locked:1;  	u8 clear_isr:1; +	u8 probe_done:1;  	u8 revid; @@ -1519,6 +1540,7 @@ struct ipr_ioa_cfg {  	wait_queue_head_t reset_wait_q;  	wait_queue_head_t msi_wait_q; +	wait_queue_head_t eeh_wait_q;  	struct ipr_dump *dump;  	enum ipr_sdt_state sdt_state; diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index 8d5ea8a1e5a..52a216f21ae 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -374,6 +374,7 @@ static struct scsi_host_template ips_driver_template = {  	.sg_tablesize		= IPS_MAX_SG,  	.cmd_per_lun		= 3,  	.use_clustering		= ENABLE_CLUSTERING, +	.no_write_same		= 1,  }; diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 4911310a38f..22a9bb1abae 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)  }  #define for_each_isci_host(id, ihost, pdev) \ -	for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ -	     id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ -	     ihost = to_pci_info(pdev)->hosts[++id]) +	for (id = 0; id < SCI_MAX_CONTROLLERS && \ +	     (ihost = to_pci_info(pdev)->hosts[id]); id++)  static inline void wait_for_start(struct isci_host *ihost)  { diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index d25d0d859f0..695b34e9154 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -66,7 +66,7 @@  #include "probe_roms.h"  #define MAJ 1 -#define MIN 1 +#define MIN 2  #define BUILD 0  #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \  	__stringify(BUILD) diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 85c77f6b802..ac879745ef8 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,  					  SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);  	} else {  		/* the phy is already the part of the port */ -		u32 port_state = iport->sm.current_state_id; - -		/* if the PORT'S state is resetting then the link up is from -		 * port hard reset in this case, we need to tell the port -		 * that link up is recieved -		 */ -		BUG_ON(port_state != SCI_PORT_RESETTING);  		port_agent->phy_ready_mask |= 1 << phy_index;  		sci_port_link_up(iport, iphy);  	} diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 96a26f45467..cc51f38b116 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1541,7 +1541,7 @@ void isci_remote_device_release(struct kref *kref)  	clear_bit(IDEV_STOP_PENDING, &idev->flags);  	clear_bit(IDEV_IO_READY, &idev->flags);  	clear_bit(IDEV_GONE, &idev->flags); -	smp_mb__before_clear_bit(); +	smp_mb__before_atomic();  	clear_bit(IDEV_ALLOCATED, &idev->flags);  	wake_up(&ihost->eventq);  } diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 99d2930b18c..56e38096f0c 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -2723,13 +2723,9 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_  	memcpy(resp->ending_fis, fis, sizeof(*fis));  	ts->buf_valid_size = sizeof(*resp); -	/* If the device fault bit is set in the status register, then -	 * set the sense data and return. -	 */ -	if (fis->status & ATA_DF) +	/* If an error is flagged let libata decode the fis */ +	if (ac_err_mask(fis->status))  		ts->stat = SAS_PROTO_RESPONSE; -	else if (fis->status & ATA_ERR) -		ts->stat = SAM_STAT_CHECK_CONDITION;  	else  		ts->stat = SAM_STAT_GOOD; diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 0d30ca849e8..5d6fda72d65 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)  		/* XXX: need to cleanup any ireqs targeting this  		 * domain_device  		 */ -		ret = TMF_RESP_FUNC_COMPLETE; +		ret = -ENODEV;  		goto out;  	} diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c index 14c1c8f6a95..680bf6f0ce7 100644 --- a/drivers/scsi/iscsi_boot_sysfs.c +++ b/drivers/scsi/iscsi_boot_sysfs.c @@ -490,5 +490,6 @@ void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset)  		iscsi_boot_remove_kobj(boot_kobj);  	kset_unregister(boot_kset->kset); +	kfree(boot_kset);  }  EXPORT_SYMBOL_GPL(iscsi_boot_destroy_kset); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 9e2588a6881..a669f2d11c3 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -116,6 +116,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)  	struct iscsi_conn *conn = sk->sk_user_data;  	if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) && +	    (conn->session->state != ISCSI_STATE_LOGGING_OUT) &&  	    !atomic_read(&sk->sk_rmem_alloc)) {  		ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");  		iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE); @@ -124,7 +125,7 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)  	return 0;  } -static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag) +static void iscsi_sw_tcp_data_ready(struct sock *sk)  {  	struct iscsi_conn *conn;  	struct iscsi_tcp_conn *tcp_conn; @@ -243,7 +244,7 @@ iscsi_sw_tcp_conn_restore_callbacks(struct iscsi_conn *conn)  	sk->sk_data_ready   = tcp_sw_conn->old_data_ready;  	sk->sk_state_change = tcp_sw_conn->old_state_change;  	sk->sk_write_space  = tcp_sw_conn->old_write_space; -	sk->sk_no_check	 = 0; +	sk->sk_no_check_tx = 0;  	write_unlock_bh(&sk->sk_callback_lock);  } @@ -592,9 +593,9 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)  	iscsi_sw_tcp_conn_restore_callbacks(conn);  	sock_put(sock->sk); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	tcp_sw_conn->sock = NULL; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	sockfd_put(sock);  } @@ -662,10 +663,10 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,  	if (err)  		goto free_socket; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	/* bind iSCSI connection and socket */  	tcp_sw_conn->sock = sock; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	/* setup Socket parameters */  	sk = sock->sk; @@ -725,14 +726,14 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,  	switch(param) {  	case ISCSI_PARAM_CONN_PORT:  	case ISCSI_PARAM_CONN_ADDRESS: -		spin_lock_bh(&conn->session->lock); +		spin_lock_bh(&conn->session->frwd_lock);  		if (!tcp_sw_conn || !tcp_sw_conn->sock) { -			spin_unlock_bh(&conn->session->lock); +			spin_unlock_bh(&conn->session->frwd_lock);  			return -ENOTCONN;  		}  		rc = kernel_getpeername(tcp_sw_conn->sock,  					(struct sockaddr *)&addr, &len); -		spin_unlock_bh(&conn->session->lock); +		spin_unlock_bh(&conn->session->frwd_lock);  		if (rc)  			return rc; @@ -758,23 +759,26 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,  	switch (param) {  	case ISCSI_HOST_PARAM_IPADDRESS: -		spin_lock_bh(&session->lock); +		if (!session) +			return -ENOTCONN; + +		spin_lock_bh(&session->frwd_lock);  		conn = session->leadconn;  		if (!conn) { -			spin_unlock_bh(&session->lock); +			spin_unlock_bh(&session->frwd_lock);  			return -ENOTCONN;  		}  		tcp_conn = conn->dd_data;  		tcp_sw_conn = tcp_conn->dd_data;  		if (!tcp_sw_conn->sock) { -			spin_unlock_bh(&session->lock); +			spin_unlock_bh(&session->frwd_lock);  			return -ENOTCONN;  		}  		rc = kernel_getsockname(tcp_sw_conn->sock,  					(struct sockaddr *)&addr, &len); -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		if (rc)  			return rc; diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h index 666fe09378f..f42ecb238af 100644 --- a/drivers/scsi/iscsi_tcp.h +++ b/drivers/scsi/iscsi_tcp.h @@ -40,7 +40,7 @@ struct iscsi_sw_tcp_conn {  	struct iscsi_sw_tcp_send out;  	/* old values for socket callbacks */ -	void			(*old_data_ready)(struct sock *, int); +	void			(*old_data_ready)(struct sock *);  	void			(*old_state_change)(struct sock *);  	void			(*old_write_space)(struct sock *); diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 587992952b3..1b3a0947345 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -27,6 +27,7 @@  #include <linux/slab.h>  #include <linux/err.h>  #include <linux/export.h> +#include <linux/log2.h>  #include <scsi/fc/fc_fc2.h> @@ -303,10 +304,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,  		fr_eof(fp) = FC_EOF_N;  	} -	/* -	 * Initialize remainig fh fields -	 * from fc_fill_fc_hdr -	 */ +	/* Initialize remaining fh fields from fc_fill_fc_hdr */  	fh->fh_ox_id = htons(ep->oxid);  	fh->fh_rx_id = htons(ep->rxid);  	fh->fh_seq_id = ep->seq.id; @@ -362,9 +360,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,  	FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec); -	if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, -			       msecs_to_jiffies(timer_msec))) -		fc_exch_hold(ep);		/* hold for timer */ +	fc_exch_hold(ep);		/* hold for timer */ +	if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work, +				msecs_to_jiffies(timer_msec))) +		fc_exch_release(ep);  }  /** @@ -382,6 +381,8 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)  /**   * fc_exch_done_locked() - Complete an exchange with the exchange lock held   * @ep: The exchange that is complete + * + * Note: May sleep if invoked from outside a response handler.   */  static int fc_exch_done_locked(struct fc_exch *ep)  { @@ -393,7 +394,6 @@ static int fc_exch_done_locked(struct fc_exch *ep)  	 * ep, and in that case we only clear the resp and set it as  	 * complete, so it can be reused by the timer to send the rrq.  	 */ -	ep->resp = NULL;  	if (ep->state & FC_EX_DONE)  		return rc;  	ep->esb_stat |= ESB_ST_COMPLETE; @@ -464,15 +464,21 @@ static void fc_exch_delete(struct fc_exch *ep)  }  static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp, -		       struct fc_frame *fp) +			      struct fc_frame *fp)  {  	struct fc_exch *ep;  	struct fc_frame_header *fh = fc_frame_header_get(fp); -	int error; +	int error = -ENXIO;  	u32 f_ctl;  	u8 fh_type = fh->fh_type;  	ep = fc_seq_exch(sp); + +	if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) { +		fc_frame_free(fp); +		goto out; +	} +  	WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));  	f_ctl = ntoh24(fh->fh_f_ctl); @@ -515,6 +521,9 @@ out:   * @lport: The local port that the exchange will be sent on   * @sp:	   The sequence to be sent   * @fp:	   The frame to be sent on the exchange + * + * Note: The frame will be freed either by a direct call to fc_frame_free(fp) + * or indirectly by calling libfc_function_template.frame_send().   */  static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,  		       struct fc_frame *fp) @@ -581,6 +590,8 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)  /*   * Set the response handler for the exchange associated with a sequence. + * + * Note: May sleep if invoked from outside a response handler.   */  static void fc_seq_set_resp(struct fc_seq *sp,  			    void (*resp)(struct fc_seq *, struct fc_frame *, @@ -588,8 +599,18 @@ static void fc_seq_set_resp(struct fc_seq *sp,  			    void *arg)  {  	struct fc_exch *ep = fc_seq_exch(sp); +	DEFINE_WAIT(wait);  	spin_lock_bh(&ep->ex_lock); +	while (ep->resp_active && ep->resp_task != current) { +		prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE); +		spin_unlock_bh(&ep->ex_lock); + +		schedule(); + +		spin_lock_bh(&ep->ex_lock); +	} +	finish_wait(&ep->resp_wq, &wait);  	ep->resp = resp;  	ep->arg = arg;  	spin_unlock_bh(&ep->ex_lock); @@ -622,27 +643,31 @@ static int fc_exch_abort_locked(struct fc_exch *ep,  	if (!sp)  		return -ENOMEM; -	ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;  	if (timer_msec)  		fc_exch_timer_set_locked(ep, timer_msec); -	/* -	 * If not logged into the fabric, don't send ABTS but leave -	 * sequence active until next timeout. -	 */ -	if (!ep->sid) -		return 0; - -	/* -	 * Send an abort for the sequence that timed out. -	 */ -	fp = fc_frame_alloc(ep->lp, 0); -	if (fp) { -		fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, -			       FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); -		error = fc_seq_send_locked(ep->lp, sp, fp); -	} else -		error = -ENOBUFS; +	if (ep->sid) { +		/* +		 * Send an abort for the sequence that timed out. +		 */ +		fp = fc_frame_alloc(ep->lp, 0); +		if (fp) { +			ep->esb_stat |= ESB_ST_SEQ_INIT; +			fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid, +				       FC_TYPE_BLS, FC_FC_END_SEQ | +				       FC_FC_SEQ_INIT, 0); +			error = fc_seq_send_locked(ep->lp, sp, fp); +		} else { +			error = -ENOBUFS; +		} +	} else { +		/* +		 * If not logged into the fabric, don't send ABTS but leave +		 * sequence active until next timeout. +		 */ +		error = 0; +	} +	ep->esb_stat |= ESB_ST_ABNORMAL;  	return error;  } @@ -669,6 +694,61 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,  }  /** + * fc_invoke_resp() - invoke ep->resp() + * + * Notes: + * It is assumed that after initialization finished (this means the + * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are + * modified only via fc_seq_set_resp(). This guarantees that none of these + * two variables changes if ep->resp_active > 0. + * + * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when + * this function is invoked, the first spin_lock_bh() call in this function + * will wait until fc_seq_set_resp() has finished modifying these variables. + * + * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that + * ep->resp() won't be invoked after fc_exch_done() has returned. + * + * The response handler itself may invoke fc_exch_done(), which will clear the + * ep->resp pointer. + * + * Return value: + * Returns true if and only if ep->resp has been invoked. + */ +static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp, +			   struct fc_frame *fp) +{ +	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); +	void *arg; +	bool res = false; + +	spin_lock_bh(&ep->ex_lock); +	ep->resp_active++; +	if (ep->resp_task != current) +		ep->resp_task = !ep->resp_task ? current : NULL; +	resp = ep->resp; +	arg = ep->arg; +	spin_unlock_bh(&ep->ex_lock); + +	if (resp) { +		resp(sp, fp, arg); +		res = true; +	} else if (!IS_ERR(fp)) { +		fc_frame_free(fp); +	} + +	spin_lock_bh(&ep->ex_lock); +	if (--ep->resp_active == 0) +		ep->resp_task = NULL; +	spin_unlock_bh(&ep->ex_lock); + +	if (ep->resp_active == 0) +		wake_up(&ep->resp_wq); + +	return res; +} + +/**   * fc_exch_timeout() - Handle exchange timer expiration   * @work: The work_struct identifying the exchange that timed out   */ @@ -677,8 +757,6 @@ static void fc_exch_timeout(struct work_struct *work)  	struct fc_exch *ep = container_of(work, struct fc_exch,  					  timeout_work.work);  	struct fc_seq *sp = &ep->seq; -	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); -	void *arg;  	u32 e_stat;  	int rc = 1; @@ -696,16 +774,13 @@ static void fc_exch_timeout(struct work_struct *work)  			fc_exch_rrq(ep);  		goto done;  	} else { -		resp = ep->resp; -		arg = ep->arg; -		ep->resp = NULL;  		if (e_stat & ESB_ST_ABNORMAL)  			rc = fc_exch_done_locked(ep);  		spin_unlock_bh(&ep->ex_lock);  		if (!rc)  			fc_exch_delete(ep); -		if (resp) -			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg); +		fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT)); +		fc_seq_set_resp(sp, NULL, ep->arg);  		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);  		goto done;  	} @@ -792,6 +867,8 @@ hit:  	ep->f_ctl = FC_FC_FIRST_SEQ;	/* next seq is first seq */  	ep->rxid = FC_XID_UNKNOWN;  	ep->class = mp->class; +	ep->resp_active = 0; +	init_waitqueue_head(&ep->resp_wq);  	INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);  out:  	return ep; @@ -838,8 +915,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)  		pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);  		spin_lock_bh(&pool->lock);  		ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order); -		if (ep && ep->xid == xid) +		if (ep) { +			WARN_ON(ep->xid != xid);  			fc_exch_hold(ep); +		}  		spin_unlock_bh(&pool->lock);  	}  	return ep; @@ -850,6 +929,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)   * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and   *		    the memory allocated for the related objects may be freed.   * @sp: The sequence that has completed + * + * Note: May sleep if invoked from outside a response handler.   */  static void fc_exch_done(struct fc_seq *sp)  { @@ -859,6 +940,8 @@ static void fc_exch_done(struct fc_seq *sp)  	spin_lock_bh(&ep->ex_lock);  	rc = fc_exch_done_locked(ep);  	spin_unlock_bh(&ep->ex_lock); + +	fc_seq_set_resp(sp, NULL, ep->arg);  	if (!rc)  		fc_exch_delete(ep);  } @@ -987,6 +1070,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,  		}  	} +	spin_lock_bh(&ep->ex_lock);  	/*  	 * At this point, we have the exchange held.  	 * Find or create the sequence. @@ -1014,11 +1098,11 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,  				 * sending RSP, hence write request on other  				 * end never finishes.  				 */ -				spin_lock_bh(&ep->ex_lock);  				sp->ssb_stat |= SSB_ST_RESP;  				sp->id = fh->fh_seq_id; -				spin_unlock_bh(&ep->ex_lock);  			} else { +				spin_unlock_bh(&ep->ex_lock); +  				/* sequence/exch should exist */  				reject = FC_RJT_SEQ_ID;  				goto rel; @@ -1029,6 +1113,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,  	if (f_ctl & FC_FC_SEQ_INIT)  		ep->esb_stat |= ESB_ST_SEQ_INIT; +	spin_unlock_bh(&ep->ex_lock);  	fr_seq(fp) = sp;  out: @@ -1291,21 +1376,23 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)  	if (!ep)  		goto reject; + +	fp = fc_frame_alloc(ep->lp, sizeof(*ap)); +	if (!fp) +		goto free; +  	spin_lock_bh(&ep->ex_lock);  	if (ep->esb_stat & ESB_ST_COMPLETE) {  		spin_unlock_bh(&ep->ex_lock); + +		fc_frame_free(fp);  		goto reject;  	} -	if (!(ep->esb_stat & ESB_ST_REC_QUAL)) +	if (!(ep->esb_stat & ESB_ST_REC_QUAL)) { +		ep->esb_stat |= ESB_ST_REC_QUAL;  		fc_exch_hold(ep);		/* hold for REC_QUAL */ -	ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL; -	fc_exch_timer_set_locked(ep, ep->r_a_tov); - -	fp = fc_frame_alloc(ep->lp, sizeof(*ap)); -	if (!fp) { -		spin_unlock_bh(&ep->ex_lock); -		goto free;  	} +	fc_exch_timer_set_locked(ep, ep->r_a_tov);  	fh = fc_frame_header_get(fp);  	ap = fc_frame_payload_get(fp, sizeof(*ap));  	memset(ap, 0, sizeof(*ap)); @@ -1319,14 +1406,16 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)  	}  	sp = fc_seq_start_next_locked(sp);  	fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS); +	ep->esb_stat |= ESB_ST_ABNORMAL;  	spin_unlock_bh(&ep->ex_lock); + +free:  	fc_frame_free(rx_fp);  	return;  reject:  	fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID); -free: -	fc_frame_free(rx_fp); +	goto free;  }  /** @@ -1416,9 +1505,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,  		 * If new exch resp handler is valid then call that  		 * first.  		 */ -		if (ep->resp) -			ep->resp(sp, fp, ep->arg); -		else +		if (!fc_invoke_resp(ep, sp, fp))  			lport->tt.lport_recv(lport, fp);  		fc_exch_release(ep);	/* release from lookup */  	} else { @@ -1442,8 +1529,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)  	struct fc_exch *ep;  	enum fc_sof sof;  	u32 f_ctl; -	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); -	void *ex_resp_arg;  	int rc;  	ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); @@ -1478,19 +1563,19 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)  	f_ctl = ntoh24(fh->fh_f_ctl);  	fr_seq(fp) = sp; + +	spin_lock_bh(&ep->ex_lock);  	if (f_ctl & FC_FC_SEQ_INIT)  		ep->esb_stat |= ESB_ST_SEQ_INIT; +	spin_unlock_bh(&ep->ex_lock);  	if (fc_sof_needs_ack(sof))  		fc_seq_send_ack(sp, fp); -	resp = ep->resp; -	ex_resp_arg = ep->arg;  	if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&  	    (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==  	    (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {  		spin_lock_bh(&ep->ex_lock); -		resp = ep->resp;  		rc = fc_exch_done_locked(ep);  		WARN_ON(fc_seq_exch(sp) != ep);  		spin_unlock_bh(&ep->ex_lock); @@ -1511,10 +1596,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)  	 * If new exch resp handler is valid then call that  	 * first.  	 */ -	if (resp) -		resp(sp, fp, ex_resp_arg); -	else -		fc_frame_free(fp); +	fc_invoke_resp(ep, sp, fp); +  	fc_exch_release(ep);  	return;  rel: @@ -1553,8 +1636,6 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)   */  static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)  { -	void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg); -	void *ex_resp_arg;  	struct fc_frame_header *fh;  	struct fc_ba_acc *ap;  	struct fc_seq *sp; @@ -1599,9 +1680,6 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)  		break;  	} -	resp = ep->resp; -	ex_resp_arg = ep->arg; -  	/* do we need to do some other checks here. Can we reuse more of  	 * fc_exch_recv_seq_resp  	 */ @@ -1613,17 +1691,14 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)  	    ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)  		rc = fc_exch_done_locked(ep);  	spin_unlock_bh(&ep->ex_lock); + +	fc_exch_hold(ep);  	if (!rc)  		fc_exch_delete(ep); - -	if (resp) -		resp(sp, fp, ex_resp_arg); -	else -		fc_frame_free(fp); - +	fc_invoke_resp(ep, sp, fp);  	if (has_rec)  		fc_exch_timer_set(ep, ep->r_a_tov); - +	fc_exch_release(ep);  }  /** @@ -1662,7 +1737,7 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)  			break;  		default:  			if (ep) -				FC_EXCH_DBG(ep, "BLS rctl %x - %s received", +				FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",  					    fh->fh_r_ctl,  					    fc_exch_rctl_name(fh->fh_r_ctl));  			break; @@ -1745,32 +1820,33 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,  /**   * fc_exch_reset() - Reset an exchange   * @ep: The exchange to be reset + * + * Note: May sleep if invoked from outside a response handler.   */  static void fc_exch_reset(struct fc_exch *ep)  {  	struct fc_seq *sp; -	void (*resp)(struct fc_seq *, struct fc_frame *, void *); -	void *arg;  	int rc = 1;  	spin_lock_bh(&ep->ex_lock);  	fc_exch_abort_locked(ep, 0);  	ep->state |= FC_EX_RST_CLEANUP;  	fc_exch_timer_cancel(ep); -	resp = ep->resp; -	ep->resp = NULL;  	if (ep->esb_stat & ESB_ST_REC_QUAL)  		atomic_dec(&ep->ex_refcnt);	/* drop hold for rec_qual */  	ep->esb_stat &= ~ESB_ST_REC_QUAL; -	arg = ep->arg;  	sp = &ep->seq;  	rc = fc_exch_done_locked(ep);  	spin_unlock_bh(&ep->ex_lock); + +	fc_exch_hold(ep); +  	if (!rc)  		fc_exch_delete(ep); -	if (resp) -		resp(sp, ERR_PTR(-FC_EX_CLOSED), arg); +	fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); +	fc_seq_set_resp(sp, NULL, ep->arg); +	fc_exch_release(ep);  }  /** @@ -1956,13 +2032,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)  	switch (op) {  	case ELS_LS_RJT: -		FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ"); +		FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");  		/* fall through */  	case ELS_LS_ACC:  		goto cleanup;  	default: -		FC_EXCH_DBG(aborted_ep, "unexpected response op %x " -			    "for RRQ", op); +		FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n", +			    op);  		return;  	} @@ -2533,13 +2609,8 @@ int fc_setup_exch_mgr(void)  	 * cpu on which exchange originated by simple bitwise  	 * AND operation between fc_cpu_mask and exchange id.  	 */ -	fc_cpu_mask = 1; -	fc_cpu_order = 0; -	while (fc_cpu_mask < nr_cpu_ids) { -		fc_cpu_mask <<= 1; -		fc_cpu_order++; -	} -	fc_cpu_mask--; +	fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids)); +	fc_cpu_mask = (1 << fc_cpu_order) - 1;  	fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");  	if (!fc_exch_workqueue) diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 5fd0f1fbe58..1d7e76e8b44 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -902,7 +902,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)  	/*  	 * Check for missing or extra data frames.  	 */ -	if (unlikely(fsp->xfer_len != expected_len)) { +	if (unlikely(fsp->cdb_status == SAM_STAT_GOOD && +		     fsp->xfer_len != expected_len)) {  		if (fsp->xfer_len < expected_len) {  			/*  			 * Some data may be queued locally, @@ -955,12 +956,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)  		 * Test for transport underrun, independent of response  		 * underrun status.  		 */ -		if (fsp->xfer_len < fsp->data_len && !fsp->io_status && +		if (fsp->cdb_status == SAM_STAT_GOOD && +		    fsp->xfer_len < fsp->data_len && !fsp->io_status &&  		    (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) || -		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) { +		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid))  			fsp->status_code = FC_DATA_UNDRUN; -			fsp->io_status = 0; -		}  	}  	seq = fsp->seq_ptr; diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index f04d15c67df..e01a29863c3 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -516,7 +516,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,   * @lport: The local port receiving the LOGO   * @fp:	   The LOGO request frame   * - * Locking Note: The lport lock is exected to be held before calling + * Locking Note: The lport lock is expected to be held before calling   * this function.   */  static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) @@ -1088,7 +1088,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)  {  	unsigned long delay = 0;  	FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n", -		     PTR_ERR(fp), fc_lport_state(lport), +		     IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),  		     lport->retry_count);  	if (PTR_ERR(fp) == -FC_EX_CLOSED) diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index c710d908fda..589ff9aedd3 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -1705,7 +1705,7 @@ reject:   * @rdata: The remote port that sent the PRLI request   * @rx_fp: The PRLI request frame   * - * Locking Note: The rport lock is exected to be held before calling + * Locking Note: The rport lock is expected to be held before calling   * this function.   */  static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, @@ -1824,7 +1824,7 @@ drop:   * @rdata: The remote port that sent the PRLO request   * @rx_fp: The PRLO request frame   * - * Locking Note: The rport lock is exected to be held before calling + * Locking Note: The rport lock is expected to be held before calling   * this function.   */  static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, @@ -1895,7 +1895,7 @@ drop:   * @lport: The local port that received the LOGO request   * @fp:	   The LOGO request frame   * - * Locking Note: The rport lock is exected to be held before calling + * Locking Note: The rport lock is expected to be held before calling   * this function.   */  static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index e3995612ea7..3d1bc67bac9 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -110,16 +110,8 @@ static void __iscsi_update_cmdsn(struct iscsi_session *session,  		session->exp_cmdsn = exp_cmdsn;  	if (max_cmdsn != session->max_cmdsn && -	    !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) { +	    !iscsi_sna_lt(max_cmdsn, session->max_cmdsn))  		session->max_cmdsn = max_cmdsn; -		/* -		 * if the window closed with IO queued, then kick the -		 * xmit thread -		 */ -		if (!list_empty(&session->leadconn->cmdqueue) || -		    !list_empty(&session->leadconn->mgmtqueue)) -			iscsi_conn_queue_work(session->leadconn); -	}  }  void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr) @@ -346,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)  	struct iscsi_session *session = conn->session;  	struct scsi_cmnd *sc = task->sc;  	struct iscsi_scsi_req *hdr; -	unsigned hdrlength, cmd_len; +	unsigned hdrlength, cmd_len, transfer_length;  	itt_t itt;  	int rc; @@ -395,11 +387,15 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)  		if (rc)  			return rc;  	} + +	if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) +		task->protected = true; + +	transfer_length = scsi_transfer_length(sc); +	hdr->data_length = cpu_to_be32(transfer_length);  	if (sc->sc_data_direction == DMA_TO_DEVICE) { -		unsigned out_len = scsi_out(sc)->length;  		struct iscsi_r2t_info *r2t = &task->unsol_r2t; -		hdr->data_length = cpu_to_be32(out_len);  		hdr->flags |= ISCSI_FLAG_CMD_WRITE;  		/*  		 * Write counters: @@ -418,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)  		memset(r2t, 0, sizeof(*r2t));  		if (session->imm_data_en) { -			if (out_len >= session->first_burst) +			if (transfer_length >= session->first_burst)  				task->imm_count = min(session->first_burst,  							conn->max_xmit_dlength);  			else -				task->imm_count = min(out_len, -							conn->max_xmit_dlength); +				task->imm_count = min(transfer_length, +						      conn->max_xmit_dlength);  			hton24(hdr->dlength, task->imm_count);  		} else  			zero_data(hdr->dlength);  		if (!session->initial_r2t_en) { -			r2t->data_length = min(session->first_burst, out_len) - +			r2t->data_length = min(session->first_burst, +					       transfer_length) -  					       task->imm_count;  			r2t->data_offset = task->imm_count;  			r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG); @@ -442,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)  	} else {  		hdr->flags |= ISCSI_FLAG_CMD_FINAL;  		zero_data(hdr->dlength); -		hdr->data_length = cpu_to_be32(scsi_in(sc)->length);  		if (sc->sc_data_direction == DMA_FROM_DEVICE)  			hdr->flags |= ISCSI_FLAG_CMD_READ; @@ -470,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)  			  scsi_bidi_cmnd(sc) ? "bidirectional" :  			  sc->sc_data_direction == DMA_TO_DEVICE ?  			  "write" : "read", conn->id, sc, sc->cmnd[0], -			  task->itt, scsi_bufflen(sc), +			  task->itt, transfer_length,  			  scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,  			  session->cmdsn,  			  session->max_cmdsn - session->exp_cmdsn + 1); @@ -481,7 +477,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)   * iscsi_free_task - free a task   * @task: iscsi cmd task   * - * Must be called with session lock. + * Must be called with session back_lock.   * This function returns the scsi command to scsi-ml or cleans   * up mgmt tasks then returns the task to the pool.   */ @@ -535,9 +531,10 @@ void iscsi_put_task(struct iscsi_task *task)  {  	struct iscsi_session *session = task->conn->session; -	spin_lock_bh(&session->lock); +	/* regular RX path uses back_lock */ +	spin_lock_bh(&session->back_lock);  	__iscsi_put_task(task); -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->back_lock);  }  EXPORT_SYMBOL_GPL(iscsi_put_task); @@ -546,7 +543,7 @@ EXPORT_SYMBOL_GPL(iscsi_put_task);   * @task: iscsi cmd task   * @state: state to complete task with   * - * Must be called with session lock. + * Must be called with session back_lock.   */  static void iscsi_complete_task(struct iscsi_task *task, int state)  { @@ -585,7 +582,7 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)   * This is used when drivers do not need or cannot perform   * lower level pdu processing.   * - * Called with session lock + * Called with session back_lock   */  void iscsi_complete_scsi_task(struct iscsi_task *task,  			      uint32_t exp_cmdsn, uint32_t max_cmdsn) @@ -602,7 +599,7 @@ EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);  /* - * session lock must be held and if not called for a task that is + * session back_lock must be held and if not called for a task that is   * still pending or from the xmit thread, then xmit thread must   * be suspended.   */ @@ -642,7 +639,10 @@ static void fail_scsi_task(struct iscsi_task *task, int err)  		scsi_in(sc)->resid = scsi_in(sc)->length;  	} +	/* regular RX path uses back_lock */ +	spin_lock_bh(&conn->session->back_lock);  	iscsi_complete_task(task, state); +	spin_unlock_bh(&conn->session->back_lock);  }  static int iscsi_prep_mgmt_task(struct iscsi_conn *conn, @@ -780,7 +780,10 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,  	return task;  free_task: +	/* regular RX path uses back_lock */ +	spin_lock_bh(&session->back_lock);  	__iscsi_put_task(task); +	spin_unlock_bh(&session->back_lock);  	return NULL;  } @@ -791,10 +794,10 @@ int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,  	struct iscsi_session *session = conn->session;  	int err = 0; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))  		err = -EPERM; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	return err;  }  EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu); @@ -823,6 +826,33 @@ static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,  	sc->result = (DID_OK << 16) | rhdr->cmd_status; +	if (task->protected) { +		sector_t sector; +		u8 ascq; + +		/** +		 * Transports that didn't implement check_protection +		 * callback but still published T10-PI support to scsi-mid +		 * deserve this BUG_ON. +		 **/ +		BUG_ON(!session->tt->check_protection); + +		ascq = session->tt->check_protection(task, §or); +		if (ascq) { +			sc->result = DRIVER_SENSE << 24 | +				     SAM_STAT_CHECK_CONDITION; +			scsi_build_sense_buffer(1, sc->sense_buffer, +						ILLEGAL_REQUEST, 0x10, ascq); +			sc->sense_buffer[7] = 0xc; /* Additional sense length */ +			sc->sense_buffer[8] = 0;   /* Information desc type */ +			sc->sense_buffer[9] = 0xa; /* Additional desc length */ +			sc->sense_buffer[10] = 0x80; /* Validity bit */ + +			put_unaligned_be64(sector, &sc->sense_buffer[12]); +			goto out; +		} +	} +  	if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {  		sc->result = DID_ERROR << 16;  		goto out; @@ -1013,13 +1043,13 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,  		iscsi_conn_printk(KERN_ERR, conn,  				  "pdu (op 0x%x itt 0x%x) rejected "  				  "due to DataDigest error.\n", -				  rejected_pdu.itt, opcode); +				  opcode, rejected_pdu.itt);  		break;  	case ISCSI_REASON_IMM_CMD_REJECT:  		iscsi_conn_printk(KERN_ERR, conn,  				  "pdu (op 0x%x itt 0x%x) rejected. Too many "  				  "immediate commands.\n", -				  rejected_pdu.itt, opcode); +				  opcode, rejected_pdu.itt);  		/*  		 * We only send one TMF at a time so if the target could not  		 * handle it, then it should get fixed (RFC mandates that @@ -1031,14 +1061,19 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,  		if (opcode != ISCSI_OP_NOOP_OUT)  			return 0; -		 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) +		 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG)) {  			/*  			 * nop-out in response to target's nop-out rejected.  			 * Just resend.  			 */ +			/* In RX path we are under back lock */ +			spin_unlock(&conn->session->back_lock); +			spin_lock(&conn->session->frwd_lock);  			iscsi_send_nopout(conn,  					  (struct iscsi_nopin*)&rejected_pdu); -		else { +			spin_unlock(&conn->session->frwd_lock); +			spin_lock(&conn->session->back_lock); +		} else {  			struct iscsi_task *task;  			/*  			 * Our nop as ping got dropped. We know the target @@ -1059,8 +1094,8 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,  	default:  		iscsi_conn_printk(KERN_ERR, conn,  				  "pdu (op 0x%x itt 0x%x) rejected. Reason " -				  "code 0x%x\n", rejected_pdu.itt, -				  rejected_pdu.opcode, reject->reason); +				  "code 0x%x\n", rejected_pdu.opcode, +				  rejected_pdu.itt, reject->reason);  		break;  	}  	return rc; @@ -1074,7 +1109,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,   * This should be used for mgmt tasks like login and nops, or if   * the LDD's itt space does not include the session age.   * - * The session lock must be held. + * The session back_lock must be held.   */  struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)  { @@ -1103,7 +1138,7 @@ EXPORT_SYMBOL_GPL(iscsi_itt_to_task);   * @datalen: len of data buffer   *   * Completes pdu processing by freeing any resources allocated at - * queuecommand or send generic. session lock must be held and verify + * queuecommand or send generic. session back_lock must be held and verify   * itt must have been called.   */  int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, @@ -1140,7 +1175,12 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,  			if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))  				break; +			/* In RX path we are under back lock */ +			spin_unlock(&session->back_lock); +			spin_lock(&session->frwd_lock);  			iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr); +			spin_unlock(&session->frwd_lock); +			spin_lock(&session->back_lock);  			break;  		case ISCSI_OP_REJECT:  			rc = iscsi_handle_reject(conn, hdr, data, datalen); @@ -1247,9 +1287,9 @@ int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,  {  	int rc; -	spin_lock(&conn->session->lock); +	spin_lock(&conn->session->back_lock);  	rc = __iscsi_complete_pdu(conn, hdr, data, datalen); -	spin_unlock(&conn->session->lock); +	spin_unlock(&conn->session->back_lock);  	return rc;  }  EXPORT_SYMBOL_GPL(iscsi_complete_pdu); @@ -1293,7 +1333,7 @@ EXPORT_SYMBOL_GPL(iscsi_verify_itt);   *   * This should be used for cmd tasks.   * - * The session lock must be held. + * The session back_lock must be held.   */  struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *conn, itt_t itt)  { @@ -1323,15 +1363,15 @@ void iscsi_session_failure(struct iscsi_session *session,  	struct iscsi_conn *conn;  	struct device *dev; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	conn = session->leadconn;  	if (session->state == ISCSI_STATE_TERMINATE || !conn) { -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		return;  	}  	dev = get_device(&conn->cls_conn->dev); -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	if (!dev)  	        return;  	/* @@ -1351,15 +1391,15 @@ void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)  {  	struct iscsi_session *session = conn->session; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (session->state == ISCSI_STATE_FAILED) { -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		return;  	}  	if (conn->stop_stage == 0)  		session->state = ISCSI_STATE_FAILED; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);  	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); @@ -1393,15 +1433,18 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)  		return -ENODATA;  	__iscsi_get_task(task); -	spin_unlock_bh(&conn->session->lock); +	spin_unlock_bh(&conn->session->frwd_lock);  	rc = conn->session->tt->xmit_task(task); -	spin_lock_bh(&conn->session->lock); +	spin_lock_bh(&conn->session->frwd_lock);  	if (!rc) {  		/* done with this task */  		task->last_xfer = jiffies;  		conn->task = NULL;  	} +	/* regular RX path uses back_lock */ +	spin_lock(&conn->session->back_lock);  	__iscsi_put_task(task); +	spin_unlock(&conn->session->back_lock);  	return rc;  } @@ -1410,7 +1453,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)   * @task: task to requeue   *   * LLDs that need to run a task from the session workqueue should call - * this. The session lock must be held. This should only be called + * this. The session frwd_lock must be held. This should only be called   * by software drivers.   */  void iscsi_requeue_task(struct iscsi_task *task) @@ -1441,10 +1484,10 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)  	struct iscsi_task *task;  	int rc = 0; -	spin_lock_bh(&conn->session->lock); +	spin_lock_bh(&conn->session->frwd_lock);  	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {  		ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n"); -		spin_unlock_bh(&conn->session->lock); +		spin_unlock_bh(&conn->session->frwd_lock);  		return -ENODATA;  	} @@ -1465,7 +1508,10 @@ check_mgmt:  					 struct iscsi_task, running);  		list_del_init(&conn->task->running);  		if (iscsi_prep_mgmt_task(conn, conn->task)) { +			/* regular RX path uses back_lock */ +			spin_lock_bh(&conn->session->back_lock);  			__iscsi_put_task(conn->task); +			spin_unlock_bh(&conn->session->back_lock);  			conn->task = NULL;  			continue;  		} @@ -1527,11 +1573,11 @@ check_mgmt:  		if (!list_empty(&conn->mgmtqueue))  			goto check_mgmt;  	} -	spin_unlock_bh(&conn->session->lock); +	spin_unlock_bh(&conn->session->frwd_lock);  	return -ENODATA;  done: -	spin_unlock_bh(&conn->session->lock); +	spin_unlock_bh(&conn->session->frwd_lock);  	return rc;  } @@ -1567,6 +1613,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn,  	task->have_checked_conn = false;  	task->last_timeout = jiffies;  	task->last_xfer = jiffies; +	task->protected = false;  	INIT_LIST_HEAD(&task->running);  	return task;  } @@ -1600,7 +1647,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)  	cls_session = starget_to_session(scsi_target(sc->device));  	session = cls_session->dd_data; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	reason = iscsi_session_chkready(cls_session);  	if (reason) { @@ -1686,13 +1733,13 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)  	}  	session->queued_cmdsn++; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	return 0;  prepd_reject:  	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);  reject: -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",  			  sc->cmnd[0], reason);  	return SCSI_MLQUEUE_TARGET_BUSY; @@ -1700,7 +1747,7 @@ reject:  prepd_fault:  	iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);  fault: -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",  			  sc->cmnd[0], reason);  	if (!scsi_bidi_cmnd(sc)) @@ -1748,14 +1795,14 @@ static void iscsi_tmf_timedout(unsigned long data)  	struct iscsi_conn *conn = (struct iscsi_conn *)data;  	struct iscsi_session *session = conn->session; -	spin_lock(&session->lock); +	spin_lock(&session->frwd_lock);  	if (conn->tmf_state == TMF_QUEUED) {  		conn->tmf_state = TMF_TIMEDOUT;  		ISCSI_DBG_EH(session, "tmf timedout\n");  		/* unblock eh_abort() */  		wake_up(&conn->ehwait);  	} -	spin_unlock(&session->lock); +	spin_unlock(&session->frwd_lock);  }  static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, @@ -1768,10 +1815,10 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,  	task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,  				      NULL, 0);  	if (!task) { -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		iscsi_conn_printk(KERN_ERR, conn, "Could not send TMF.\n");  		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); -		spin_lock_bh(&session->lock); +		spin_lock_bh(&session->frwd_lock);  		return -EPERM;  	}  	conn->tmfcmd_pdus_cnt++; @@ -1781,7 +1828,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,  	add_timer(&conn->tmf_timer);  	ISCSI_DBG_EH(session, "tmf set timeout\n"); -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	mutex_unlock(&session->eh_mutex);  	/* @@ -1800,7 +1847,7 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,  	del_timer_sync(&conn->tmf_timer);  	mutex_lock(&session->eh_mutex); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	/* if the session drops it will clean up the task */  	if (age != session->age ||  	    session->state != ISCSI_STATE_LOGGED_IN) @@ -1837,7 +1884,7 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,   * iscsi_suspend_queue - suspend iscsi_queuecommand   * @conn: iscsi conn to stop queueing IO on   * - * This grabs the session lock to make sure no one is in + * This grabs the session frwd_lock to make sure no one is in   * xmit_task/queuecommand, and then sets suspend to prevent   * new commands from being queued. This only needs to be called   * by offload drivers that need to sync a path like ep disconnect @@ -1846,9 +1893,9 @@ static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,   */  void iscsi_suspend_queue(struct iscsi_conn *conn)  { -	spin_lock_bh(&conn->session->lock); +	spin_lock_bh(&conn->session->frwd_lock);  	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); -	spin_unlock_bh(&conn->session->lock); +	spin_unlock_bh(&conn->session->frwd_lock);  }  EXPORT_SYMBOL_GPL(iscsi_suspend_queue); @@ -1907,7 +1954,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)  	ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); -	spin_lock(&session->lock); +	spin_lock(&session->frwd_lock);  	task = (struct iscsi_task *)sc->SCp.ptr;  	if (!task) {  		/* @@ -2021,7 +2068,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)  done:  	if (task)  		task->last_timeout = jiffies; -	spin_unlock(&session->lock); +	spin_unlock(&session->frwd_lock);  	ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?  		     "timer reset" : "nh");  	return rc; @@ -2033,7 +2080,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)  	struct iscsi_session *session = conn->session;  	unsigned long recv_timeout, next_timeout = 0, last_recv; -	spin_lock(&session->lock); +	spin_lock(&session->frwd_lock);  	if (session->state != ISCSI_STATE_LOGGED_IN)  		goto done; @@ -2050,7 +2097,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)  				  "last ping %lu, now %lu\n",  				  conn->ping_timeout, conn->recv_timeout,  				  last_recv, conn->last_ping, jiffies); -		spin_unlock(&session->lock); +		spin_unlock(&session->frwd_lock);  		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);  		return;  	} @@ -2066,7 +2113,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)  	ISCSI_DBG_CONN(conn, "Setting next tmo %lu\n", next_timeout);  	mod_timer(&conn->transport_timer, next_timeout);  done: -	spin_unlock(&session->lock); +	spin_unlock(&session->frwd_lock);  }  static void iscsi_prep_abort_task_pdu(struct iscsi_task *task, @@ -2096,7 +2143,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)  	ISCSI_DBG_EH(session, "aborting sc %p\n", sc);  	mutex_lock(&session->eh_mutex); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	/*  	 * if session was ISCSI_STATE_IN_RECOVERY then we may not have  	 * got the command. @@ -2104,7 +2151,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)  	if (!sc->SCp.ptr) {  		ISCSI_DBG_EH(session, "sc never reached iscsi layer or "  				      "it completed.\n"); -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		mutex_unlock(&session->eh_mutex);  		return SUCCESS;  	} @@ -2115,7 +2162,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)  	 */  	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||  	    sc->SCp.phase != session->age) { -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		mutex_unlock(&session->eh_mutex);  		ISCSI_DBG_EH(session, "failing abort due to dropped "  				  "session.\n"); @@ -2156,7 +2203,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)  	switch (conn->tmf_state) {  	case TMF_SUCCESS: -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		/*  		 * stop tx side incase the target had sent a abort rsp but  		 * the initiator was still writing out data. @@ -2167,15 +2214,15 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)  		 * good and have never sent us a successful tmf response  		 * then sent more data for the cmd.  		 */ -		spin_lock_bh(&session->lock); +		spin_lock_bh(&session->frwd_lock);  		fail_scsi_task(task, DID_ABORT);  		conn->tmf_state = TMF_INITIAL;  		memset(hdr, 0, sizeof(*hdr)); -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		iscsi_start_tx(conn);  		goto success_unlocked;  	case TMF_TIMEDOUT: -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);  		goto failed_unlocked;  	case TMF_NOT_FOUND: @@ -2194,7 +2241,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)  	}  success: -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  success_unlocked:  	ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",  		     sc, task->itt); @@ -2202,7 +2249,7 @@ success_unlocked:  	return SUCCESS;  failed: -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  failed_unlocked:  	ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,  		     task ? task->itt : 0); @@ -2235,7 +2282,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)  	ISCSI_DBG_EH(session, "LU Reset [sc %p lun %u]\n", sc, sc->device->lun);  	mutex_lock(&session->eh_mutex); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	/*  	 * Just check if we are not logged in. We cannot check for  	 * the phase because the reset could come from a ioctl. @@ -2262,7 +2309,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)  	case TMF_SUCCESS:  		break;  	case TMF_TIMEDOUT: -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);  		goto done;  	default: @@ -2271,21 +2318,21 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)  	}  	rc = SUCCESS; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	iscsi_suspend_tx(conn); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	memset(hdr, 0, sizeof(*hdr));  	fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);  	conn->tmf_state = TMF_INITIAL; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	iscsi_start_tx(conn);  	goto done;  unlock: -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  done:  	ISCSI_DBG_EH(session, "dev reset result = %s\n",  		     rc == SUCCESS ? "SUCCESS" : "FAILED"); @@ -2298,13 +2345,13 @@ void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)  {  	struct iscsi_session *session = cls_session->dd_data; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (session->state != ISCSI_STATE_LOGGED_IN) {  		session->state = ISCSI_STATE_RECOVERY_FAILED;  		if (session->leadconn)  			wake_up(&session->leadconn->ehwait);  	} -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  }  EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout); @@ -2326,19 +2373,19 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)  	conn = session->leadconn;  	mutex_lock(&session->eh_mutex); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (session->state == ISCSI_STATE_TERMINATE) {  failed:  		ISCSI_DBG_EH(session,  			     "failing session reset: Could not log back into "  			     "%s, %s [age %d]\n", session->targetname,  			     conn->persistent_address, session->age); -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		mutex_unlock(&session->eh_mutex);  		return FAILED;  	} -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	mutex_unlock(&session->eh_mutex);  	/*  	 * we drop the lock here but the leadconn cannot be destoyed while @@ -2355,14 +2402,14 @@ failed:  		flush_signals(current);  	mutex_lock(&session->eh_mutex); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (session->state == ISCSI_STATE_LOGGED_IN) {  		ISCSI_DBG_EH(session,  			     "session reset succeeded for %s,%s\n",  			     session->targetname, conn->persistent_address);  	} else  		goto failed; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	mutex_unlock(&session->eh_mutex);  	return SUCCESS;  } @@ -2398,7 +2445,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)  		     session->targetname);  	mutex_lock(&session->eh_mutex); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	/*  	 * Just check if we are not logged in. We cannot check for  	 * the phase because the reset could come from a ioctl. @@ -2425,7 +2472,7 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)  	case TMF_SUCCESS:  		break;  	case TMF_TIMEDOUT: -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		iscsi_conn_failure(conn, ISCSI_ERR_SCSI_EH_SESSION_RST);  		goto done;  	default: @@ -2434,21 +2481,21 @@ int iscsi_eh_target_reset(struct scsi_cmnd *sc)  	}  	rc = SUCCESS; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	iscsi_suspend_tx(conn); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	memset(hdr, 0, sizeof(*hdr));  	fail_scsi_tasks(conn, -1, DID_ERROR);  	conn->tmf_state = TMF_INITIAL; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	iscsi_start_tx(conn);  	goto done;  unlock: -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  done:  	ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,  		     rc == SUCCESS ? "SUCCESS" : "FAILED"); @@ -2746,8 +2793,10 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,  	session->max_r2t = 1;  	session->tt = iscsit;  	session->dd_data = cls_session->dd_data + sizeof(*session); +  	mutex_init(&session->eh_mutex); -	spin_lock_init(&session->lock); +	spin_lock_init(&session->frwd_lock); +	spin_lock_init(&session->back_lock);  	/* initialize SCSI PDU commands pool */  	if (iscsi_pool_init(&session->cmdpool, session->cmds_max, @@ -2861,14 +2910,14 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,  	INIT_WORK(&conn->xmitwork, iscsi_xmitworker);  	/* allocate login_task used for the login/text sequences */ -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (!kfifo_out(&session->cmdpool.queue,                           (void*)&conn->login_task,  			 sizeof(void*))) { -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		goto login_task_alloc_fail;  	} -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	data = (char *) __get_free_pages(GFP_KERNEL,  					 get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); @@ -2905,7 +2954,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)  	del_timer_sync(&conn->transport_timer); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;  	if (session->leadconn == conn) {  		/* @@ -2914,7 +2963,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)  		session->state = ISCSI_STATE_TERMINATE;  		wake_up(&conn->ehwait);  	} -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	/*  	 * Block until all in-progress commands for this connection @@ -2941,15 +2990,19 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)  	/* flush queued up work because we free the connection below */  	iscsi_suspend_tx(conn); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	free_pages((unsigned long) conn->data,  		   get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));  	kfree(conn->persistent_address); +	kfree(conn->local_ipaddr); +	/* regular RX path uses back_lock */ +	spin_lock_bh(&session->back_lock);  	kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task,  		    sizeof(void*)); +	spin_unlock_bh(&session->back_lock);  	if (session->leadconn == conn)  		session->leadconn = NULL; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	iscsi_destroy_conn(cls_conn);  } @@ -2986,7 +3039,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)  		conn->ping_timeout = 5;  	} -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	conn->c_stage = ISCSI_CONN_STARTED;  	session->state = ISCSI_STATE_LOGGED_IN;  	session->queued_cmdsn = session->cmdsn; @@ -3015,7 +3068,7 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)  	default:  		break;  	} -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	iscsi_unblock_session(session->cls_session);  	wake_up(&conn->ehwait); @@ -3054,9 +3107,9 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,  	int old_stop_stage;  	mutex_lock(&session->eh_mutex); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (conn->stop_stage == STOP_CONN_TERM) { -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&session->frwd_lock);  		mutex_unlock(&session->eh_mutex);  		return;  	} @@ -3073,14 +3126,14 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,  	old_stop_stage = conn->stop_stage;  	conn->stop_stage = flag; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	del_timer_sync(&conn->transport_timer);  	iscsi_suspend_tx(conn); -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	conn->c_stage = ISCSI_CONN_STOPPED; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	/*  	 * for connection level recovery we should not calculate @@ -3101,11 +3154,11 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,  	/*  	 * flush queues.  	 */ -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);  	fail_mgmt_tasks(session, conn);  	memset(&conn->tmhdr, 0, sizeof(conn->tmhdr)); -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	mutex_unlock(&session->eh_mutex);  } @@ -3132,10 +3185,10 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,  	struct iscsi_session *session = cls_session->dd_data;  	struct iscsi_conn *conn = cls_conn->dd_data; -	spin_lock_bh(&session->lock); +	spin_lock_bh(&session->frwd_lock);  	if (is_leading)  		session->leadconn = conn; -	spin_unlock_bh(&session->lock); +	spin_unlock_bh(&session->frwd_lock);  	/*  	 * Unblock xmitworker(), Login Phase will pass through. @@ -3269,6 +3322,8 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,  		sscanf(buf, "%d", &val);  		session->discovery_sess = !!val;  		break; +	case ISCSI_PARAM_LOCAL_IPADDR: +		return iscsi_switch_str_param(&conn->local_ipaddr, buf);  	default:  		return -ENOSYS;  	} @@ -3542,6 +3597,9 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,  	case ISCSI_PARAM_TCP_RECV_WSF:  		len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);  		break; +	case ISCSI_PARAM_LOCAL_IPADDR: +		len = sprintf(buf, "%s\n", conn->local_ipaddr); +		break;  	default:  		return -ENOSYS;  	} diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 1d58d533601..60cb6dc3c6f 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c @@ -446,7 +446,7 @@ iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)   * iscsi_tcp_cleanup_task - free tcp_task resources   * @task: iscsi task   * - * must be called with session lock + * must be called with session back_lock   */  void iscsi_tcp_cleanup_task(struct iscsi_task *task)  { @@ -457,6 +457,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)  	if (!task->sc)  		return; +	spin_lock_bh(&tcp_task->queue2pool);  	/* flush task's r2t queues */  	while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {  		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, @@ -470,6 +471,7 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)  			    sizeof(void*));  		tcp_task->r2t = NULL;  	} +	spin_unlock_bh(&tcp_task->queue2pool);  }  EXPORT_SYMBOL_GPL(iscsi_tcp_cleanup_task); @@ -529,6 +531,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)  	struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;  	struct iscsi_r2t_info *r2t;  	int r2tsn = be32_to_cpu(rhdr->r2tsn); +	u32 data_length; +	u32 data_offset;  	int rc;  	if (tcp_conn->in.datalen) { @@ -554,40 +558,41 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)  		return 0;  	} -	rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); -	if (!rc) { -		iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " -				  "Target has sent more R2Ts than it " -				  "negotiated for or driver has leaked.\n"); -		return ISCSI_ERR_PROTO; -	} - -	r2t->exp_statsn = rhdr->statsn; -	r2t->data_length = be32_to_cpu(rhdr->data_length); -	if (r2t->data_length == 0) { +	data_length = be32_to_cpu(rhdr->data_length); +	if (data_length == 0) {  		iscsi_conn_printk(KERN_ERR, conn,  				  "invalid R2T with zero data len\n"); -		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, -			    sizeof(void*));  		return ISCSI_ERR_DATALEN;  	} -	if (r2t->data_length > session->max_burst) +	if (data_length > session->max_burst)  		ISCSI_DBG_TCP(conn, "invalid R2T with data len %u and max "  			      "burst %u. Attempting to execute request.\n", -			      r2t->data_length, session->max_burst); +			      data_length, session->max_burst); -	r2t->data_offset = be32_to_cpu(rhdr->data_offset); -	if (r2t->data_offset + r2t->data_length > scsi_out(task->sc)->length) { +	data_offset = be32_to_cpu(rhdr->data_offset); +	if (data_offset + data_length > scsi_out(task->sc)->length) {  		iscsi_conn_printk(KERN_ERR, conn,  				  "invalid R2T with data len %u at offset %u " -				  "and total length %d\n", r2t->data_length, -				  r2t->data_offset, scsi_out(task->sc)->length); -		kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, -			    sizeof(void*)); +				  "and total length %d\n", data_length, +				  data_offset, scsi_out(task->sc)->length);  		return ISCSI_ERR_DATALEN;  	} +	spin_lock(&tcp_task->pool2queue); +	rc = kfifo_out(&tcp_task->r2tpool.queue, (void *)&r2t, sizeof(void *)); +	if (!rc) { +		iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " +				  "Target has sent more R2Ts than it " +				  "negotiated for or driver has leaked.\n"); +		spin_unlock(&tcp_task->pool2queue); +		return ISCSI_ERR_PROTO; +	} + +	r2t->exp_statsn = rhdr->statsn; +	r2t->data_length = data_length; +	r2t->data_offset = data_offset; +  	r2t->ttt = rhdr->ttt; /* no flip */  	r2t->datasn = 0;  	r2t->sent = 0; @@ -595,6 +600,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)  	tcp_task->exp_datasn = r2tsn + 1;  	kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));  	conn->r2t_pdus_cnt++; +	spin_unlock(&tcp_task->pool2queue);  	iscsi_requeue_task(task);  	return 0; @@ -667,14 +673,14 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)  	switch(opcode) {  	case ISCSI_OP_SCSI_DATA_IN: -		spin_lock(&conn->session->lock); +		spin_lock(&conn->session->back_lock);  		task = iscsi_itt_to_ctask(conn, hdr->itt);  		if (!task)  			rc = ISCSI_ERR_BAD_ITT;  		else  			rc = iscsi_tcp_data_in(conn, task);  		if (rc) { -			spin_unlock(&conn->session->lock); +			spin_unlock(&conn->session->back_lock);  			break;  		} @@ -707,11 +713,11 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)  						   tcp_conn->in.datalen,  						   iscsi_tcp_process_data_in,  						   rx_hash); -			spin_unlock(&conn->session->lock); +			spin_unlock(&conn->session->back_lock);  			return rc;  		}  		rc = __iscsi_complete_pdu(conn, hdr, NULL, 0); -		spin_unlock(&conn->session->lock); +		spin_unlock(&conn->session->back_lock);  		break;  	case ISCSI_OP_SCSI_CMD_RSP:  		if (tcp_conn->in.datalen) { @@ -721,18 +727,20 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)  		rc = iscsi_complete_pdu(conn, hdr, NULL, 0);  		break;  	case ISCSI_OP_R2T: -		spin_lock(&conn->session->lock); +		spin_lock(&conn->session->back_lock);  		task = iscsi_itt_to_ctask(conn, hdr->itt); +		spin_unlock(&conn->session->back_lock);  		if (!task)  			rc = ISCSI_ERR_BAD_ITT;  		else if (ahslen)  			rc = ISCSI_ERR_AHSLEN;  		else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {  			task->last_xfer = jiffies; +			spin_lock(&conn->session->frwd_lock);  			rc = iscsi_tcp_r2t_rsp(conn, task); +			spin_unlock(&conn->session->frwd_lock);  		} else  			rc = ISCSI_ERR_PROTO; -		spin_unlock(&conn->session->lock);  		break;  	case ISCSI_OP_LOGIN_RSP:  	case ISCSI_OP_TEXT_RSP: @@ -980,14 +988,13 @@ EXPORT_SYMBOL_GPL(iscsi_tcp_task_init);  static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)  { -	struct iscsi_session *session = task->conn->session;  	struct iscsi_tcp_task *tcp_task = task->dd_data;  	struct iscsi_r2t_info *r2t = NULL;  	if (iscsi_task_has_unsol_data(task))  		r2t = &task->unsol_r2t;  	else { -		spin_lock_bh(&session->lock); +		spin_lock_bh(&tcp_task->queue2pool);  		if (tcp_task->r2t) {  			r2t = tcp_task->r2t;  			/* Continue with this R2T? */ @@ -1009,7 +1016,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task)  			else  				r2t = tcp_task->r2t;  		} -		spin_unlock_bh(&session->lock); +		spin_unlock_bh(&tcp_task->queue2pool);  	}  	return r2t; @@ -1139,6 +1146,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session)  			iscsi_pool_free(&tcp_task->r2tpool);  			goto r2t_alloc_fail;  		} +		spin_lock_init(&tcp_task->pool2queue); +		spin_lock_init(&tcp_task->queue2pool);  	}  	return 0; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 161c98efade..766098af4eb 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -211,7 +211,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)  		qc->tf.nsect = 0;  	} -	ata_tf_to_fis(&qc->tf, 1, 0, (u8*)&task->ata_task.fis); +	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);  	task->uldd_task = qc;  	if (ata_is_atapi(qc->tf.protocol)) {  		memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); @@ -700,46 +700,26 @@ void sas_probe_sata(struct asd_sas_port *port)  } -static bool sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func) +static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)  {  	struct domain_device *dev, *n; -	bool retry = false;  	list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { -		int rc; -  		if (!dev_is_sata(dev))  			continue;  		sas_ata_wait_eh(dev); -		rc = dev->sata_dev.pm_result; -		if (rc == -EAGAIN) -			retry = true; -		else if (rc) { -			/* since we don't have a -			 * ->port_{suspend|resume} routine in our -			 *  ata_port ops, and no entanglements with -			 *  acpi, suspend should just be mechanical trip -			 *  through eh, catch cases where these -			 *  assumptions are invalidated -			 */ -			WARN_ONCE(1, "failed %s %s error: %d\n", func, -				 dev_name(&dev->rphy->dev), rc); -		}  		/* if libata failed to power manage the device, tear it down */  		if (ata_dev_disabled(sas_to_ata_dev(dev)))  			sas_fail_probe(dev, func, -ENODEV);  	} - -	return retry;  }  void sas_suspend_sata(struct asd_sas_port *port)  {  	struct domain_device *dev; - retry:  	mutex_lock(&port->ha->disco_mutex);  	list_for_each_entry(dev, &port->dev_list, dev_list_node) {  		struct sata_device *sata; @@ -751,20 +731,17 @@ void sas_suspend_sata(struct asd_sas_port *port)  		if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)  			continue; -		sata->pm_result = -EIO; -		ata_sas_port_async_suspend(sata->ap, &sata->pm_result); +		ata_sas_port_suspend(sata->ap);  	}  	mutex_unlock(&port->ha->disco_mutex); -	if (sas_ata_flush_pm_eh(port, __func__)) -		goto retry; +	sas_ata_flush_pm_eh(port, __func__);  }  void sas_resume_sata(struct asd_sas_port *port)  {  	struct domain_device *dev; - retry:  	mutex_lock(&port->ha->disco_mutex);  	list_for_each_entry(dev, &port->dev_list, dev_list_node) {  		struct sata_device *sata; @@ -776,13 +753,11 @@ void sas_resume_sata(struct asd_sas_port *port)  		if (sata->ap->pm_mesg.event == PM_EVENT_ON)  			continue; -		sata->pm_result = -EIO; -		ata_sas_port_async_resume(sata->ap, &sata->pm_result); +		ata_sas_port_resume(sata->ap);  	}  	mutex_unlock(&port->ha->disco_mutex); -	if (sas_ata_flush_pm_eh(port, __func__)) -		goto retry; +	sas_ata_flush_pm_eh(port, __func__);  }  /** diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 446b85110a1..0cac7d8fd0f 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	}  	/* do we need to support multiple segments? */ -	if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { -		printk("%s: multiple segments req %u %u, rsp %u %u\n", -		       __func__, bio_segments(req->bio), blk_rq_bytes(req), -		       bio_segments(rsp->bio), blk_rq_bytes(rsp)); +	if (bio_multiple_segments(req->bio) || +	    bio_multiple_segments(rsp->bio)) { +		printk("%s: multiple segments req %u, rsp %u\n", +		       __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));  		return -EINVAL;  	} diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index da3aee17faa..25d0f127424 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -862,7 +862,7 @@ out:  enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)  { -	scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd); +	scmd_dbg(cmd, "command %p timed out\n", cmd);  	return BLK_EH_NOT_HANDLED;  } diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 4e1b75ca745..434e9037908 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   * Portions Copyright (C) 2004-2005 Christoph Hellwig              * @@ -73,8 +73,6 @@ struct lpfc_sli2_slim;   */  /* 1 Second */  #define QUEUE_RAMP_DOWN_INTERVAL	(msecs_to_jiffies(1000 * 1)) -/* 5 minutes */ -#define QUEUE_RAMP_UP_INTERVAL		(msecs_to_jiffies(1000 * 300))  /* Number of exchanges reserved for discovery to complete */  #define LPFC_DISC_IOCB_BUFF_COUNT 20 @@ -642,6 +640,7 @@ struct lpfc_hba {  #define HBA_DEVLOSS_TMO         0x2000 /* HBA in devloss timeout */  #define HBA_RRQ_ACTIVE		0x4000 /* process the rrq active list */  #define HBA_FCP_IOQ_FLUSH	0x8000 /* FCP I/O queues being flushed */ +#define HBA_FW_DUMP_OP		0x10000 /* Skips fn reset before FW dump */  	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/  	struct lpfc_dmabuf slim2p; @@ -722,6 +721,20 @@ struct lpfc_hba {  	uint32_t cfg_hba_queue_depth;  	uint32_t cfg_enable_hba_reset;  	uint32_t cfg_enable_hba_heartbeat; +	uint32_t cfg_fof; +	uint32_t cfg_EnableXLane; +	uint8_t cfg_oas_tgt_wwpn[8]; +	uint8_t cfg_oas_vpt_wwpn[8]; +	uint32_t cfg_oas_lun_state; +#define OAS_LUN_ENABLE	1 +#define OAS_LUN_DISABLE	0 +	uint32_t cfg_oas_lun_status; +#define OAS_LUN_STATUS_EXISTS	0x01 +	uint32_t cfg_oas_flags; +#define OAS_FIND_ANY_VPORT	0x01 +#define OAS_FIND_ANY_TARGET	0x02 +#define OAS_LUN_VALID	0x04 +	uint32_t cfg_XLanePriority;  	uint32_t cfg_enable_bg;  	uint32_t cfg_hostmem_hgp;  	uint32_t cfg_log_verbose; @@ -730,6 +743,7 @@ struct lpfc_hba {  	uint32_t cfg_request_firmware_upgrade;  	uint32_t cfg_iocb_cnt;  	uint32_t cfg_suppress_link_up; +	uint32_t cfg_rrq_xri_bitmap_sz;  #define LPFC_INITIALIZE_LINK              0	/* do normal init_link mbox */  #define LPFC_DELAY_INIT_LINK              1	/* layered driver hold off */  #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2	/* wait, manual intervention */ @@ -835,6 +849,7 @@ struct lpfc_hba {  	mempool_t *mbox_mem_pool;  	mempool_t *nlp_mem_pool;  	mempool_t *rrq_pool; +	mempool_t *active_rrq_pool;  	struct fc_host_statistics link_stats;  	enum intr_type_t intr_type; @@ -869,7 +884,6 @@ struct lpfc_hba {  	atomic_t num_cmd_success;  	unsigned long last_rsrc_error_time;  	unsigned long last_ramp_down_time; -	unsigned long last_ramp_up_time;  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS  	struct dentry *hba_debugfs_root;  	atomic_t debugfs_vport_count; @@ -971,6 +985,9 @@ struct lpfc_hba {  	atomic_t sdev_cnt;  	uint8_t fips_spec_rev;  	uint8_t fips_level; +	spinlock_t devicelock;	/* lock for luns list */ +	mempool_t *device_data_mem_pool; +	struct list_head luns;  };  static inline struct Scsi_Host * diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 00656fc92b9..1d7a5c34ee8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   * Portions Copyright (C) 2004-2005 Christoph Hellwig              * @@ -529,6 +529,27 @@ lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,  }  /** + * lpfc_oas_supported_show - Return whether or not Optimized Access Storage + *			    (OAS) is supported. + * @dev: class unused variable. + * @attr: device attribute, not used. + * @buf: on return contains the module description text. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_oas_supported_show(struct device *dev, struct device_attribute *attr, +			char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; +	struct lpfc_hba *phba = vport->phba; + +	return snprintf(buf, PAGE_SIZE, "%d\n", +			phba->sli4_hba.pc_sli4_params.oas_supported); +} + +/**   * lpfc_link_state_store - Transition the link_state on an HBA port   * @dev: class device that is converted into a Scsi_host.   * @attr: device attribute, not used. @@ -898,10 +919,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)  		phba->cfg_sriov_nr_virtfn = 0;  	} +	if (opcode == LPFC_FW_DUMP) +		phba->hba_flag |= HBA_FW_DUMP_OP; +  	status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); -	if (status != 0) +	if (status != 0) { +		phba->hba_flag &= ~HBA_FW_DUMP_OP;  		return status; +	}  	/* wait for the device to be quiesced before firmware reset */  	msleep(100); @@ -2041,9 +2067,53 @@ static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);  static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,  		   lpfc_sriov_hw_max_virtfn_show, NULL);  static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL); +static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show, +		   NULL);  static char *lpfc_soft_wwn_key = "C99G71SL8032A"; +#define WWN_SZ 8 +/** + * lpfc_wwn_set - Convert string to the 8 byte WWN value. + * @buf: WWN string. + * @cnt: Length of string. + * @wwn: Array to receive converted wwn value. + * + * Returns: + * -EINVAL if the buffer does not contain a valid wwn + * 0 success + **/ +static size_t +lpfc_wwn_set(const char *buf, size_t cnt, char wwn[]) +{ +	unsigned int i, j; + +	/* Count may include a LF at end of string */ +	if (buf[cnt-1] == '\n') +		cnt--; + +	if ((cnt < 16) || (cnt > 18) || ((cnt == 17) && (*buf++ != 'x')) || +	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) +		return -EINVAL; + +	memset(wwn, 0, WWN_SZ); +	/* Validate and store the new name */ +	for (i = 0, j = 0; i < 16; i++) { +		if ((*buf >= 'a') && (*buf <= 'f')) +			j = ((j << 4) | ((*buf++ - 'a') + 10)); +		else if ((*buf >= 'A') && (*buf <= 'F')) +			j = ((j << 4) | ((*buf++ - 'A') + 10)); +		else if ((*buf >= '0') && (*buf <= '9')) +			j = ((j << 4) | (*buf++ - '0')); +		else +			return -EINVAL; +		if (i % 2) { +			wwn[i/2] = j & 0xff; +			j = 0; +		} +	} +	return 0; +}  /**   * lpfc_soft_wwn_enable_store - Allows setting of the wwn if the key is valid   * @dev: class device that is converted into a Scsi_host. @@ -2132,9 +2202,9 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,  	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;  	struct lpfc_hba   *phba = vport->phba;  	struct completion online_compl; -	int stat1=0, stat2=0; -	unsigned int i, j, cnt=count; -	u8 wwpn[8]; +	int stat1 = 0, stat2 = 0; +	unsigned int cnt = count; +	u8 wwpn[WWN_SZ];  	int rc;  	if (!phba->cfg_enable_hba_reset) @@ -2149,29 +2219,19 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,  	if (buf[cnt-1] == '\n')  		cnt--; -	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || -	    ((cnt == 17) && (*buf++ != 'x')) || -	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) +	if (!phba->soft_wwn_enable)  		return -EINVAL; +	/* lock setting wwpn, wwnn down */  	phba->soft_wwn_enable = 0; -	memset(wwpn, 0, sizeof(wwpn)); - -	/* Validate and store the new name */ -	for (i=0, j=0; i < 16; i++) { -		int value; - -		value = hex_to_bin(*buf++); -		if (value >= 0) -			j = (j << 4) | value; -		else -			return -EINVAL; -		if (i % 2) { -			wwpn[i/2] = j & 0xff; -			j = 0; -		} +	rc = lpfc_wwn_set(buf, cnt, wwpn); +	if (!rc) { +		/* not able to set wwpn, unlock it */ +		phba->soft_wwn_enable = 1; +		return rc;  	} +  	phba->cfg_soft_wwpn = wwn_to_u64(wwpn);  	fc_host_port_name(shost) = phba->cfg_soft_wwpn;  	if (phba->cfg_soft_wwnn) @@ -2198,7 +2258,7 @@ lpfc_soft_wwpn_store(struct device *dev, struct device_attribute *attr,  				"reinit adapter - %d\n", stat2);  	return (stat1 || stat2) ? -EIO : count;  } -static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,\ +static DEVICE_ATTR(lpfc_soft_wwpn, S_IRUGO | S_IWUSR,  		   lpfc_soft_wwpn_show, lpfc_soft_wwpn_store);  /** @@ -2235,39 +2295,25 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,  {  	struct Scsi_Host *shost = class_to_shost(dev);  	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; -	unsigned int i, j, cnt=count; -	u8 wwnn[8]; +	unsigned int cnt = count; +	u8 wwnn[WWN_SZ]; +	int rc;  	/* count may include a LF at end of string */  	if (buf[cnt-1] == '\n')  		cnt--; -	if (!phba->soft_wwn_enable || (cnt < 16) || (cnt > 18) || -	    ((cnt == 17) && (*buf++ != 'x')) || -	    ((cnt == 18) && ((*buf++ != '0') || (*buf++ != 'x')))) +	if (!phba->soft_wwn_enable)  		return -EINVAL; -	/* -	 * Allow wwnn to be set many times, as long as the enable is set. -	 * However, once the wwpn is set, everything locks. -	 */ - -	memset(wwnn, 0, sizeof(wwnn)); - -	/* Validate and store the new name */ -	for (i=0, j=0; i < 16; i++) { -		int value; - -		value = hex_to_bin(*buf++); -		if (value >= 0) -			j = (j << 4) | value; -		else -			return -EINVAL; -		if (i % 2) { -			wwnn[i/2] = j & 0xff; -			j = 0; -		} +	rc = lpfc_wwn_set(buf, cnt, wwnn); +	if (!rc) { +		/* Allow wwnn to be set many times, as long as the enable +		 * is set. However, once the wwpn is set, everything locks. +		 */ +		return rc;  	} +  	phba->cfg_soft_wwnn = wwn_to_u64(wwnn);  	dev_printk(KERN_NOTICE, &phba->pcidev->dev, @@ -2276,9 +2322,438 @@ lpfc_soft_wwnn_store(struct device *dev, struct device_attribute *attr,  	return count;  } -static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,\ +static DEVICE_ATTR(lpfc_soft_wwnn, S_IRUGO | S_IWUSR,  		   lpfc_soft_wwnn_show, lpfc_soft_wwnn_store); +/** + * lpfc_oas_tgt_show - Return wwpn of target whose luns maybe enabled for + *		      Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count + **/ +static ssize_t +lpfc_oas_tgt_show(struct device *dev, struct device_attribute *attr, +		  char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + +	return snprintf(buf, PAGE_SIZE, "0x%llx\n", +			wwn_to_u64(phba->cfg_oas_tgt_wwpn)); +} + +/** + * lpfc_oas_tgt_store - Store wwpn of target whose luns maybe enabled for + *		      Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_tgt_store(struct device *dev, struct device_attribute *attr, +		   const char *buf, size_t count) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; +	unsigned int cnt = count; +	uint8_t wwpn[WWN_SZ]; +	int rc; + +	if (!phba->cfg_fof) +		return -EPERM; + +	/* count may include a LF at end of string */ +	if (buf[cnt-1] == '\n') +		cnt--; + +	rc = lpfc_wwn_set(buf, cnt, wwpn); +	if (rc) +		return rc; + +	memcpy(phba->cfg_oas_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); +	memcpy(phba->sli4_hba.oas_next_tgt_wwpn, wwpn, (8 * sizeof(uint8_t))); +	if (wwn_to_u64(wwpn) == 0) +		phba->cfg_oas_flags |= OAS_FIND_ANY_TARGET; +	else +		phba->cfg_oas_flags &= ~OAS_FIND_ANY_TARGET; +	phba->cfg_oas_flags &= ~OAS_LUN_VALID; +	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; +	return count; +} +static DEVICE_ATTR(lpfc_xlane_tgt, S_IRUGO | S_IWUSR, +		   lpfc_oas_tgt_show, lpfc_oas_tgt_store); + +/** + * lpfc_oas_vpt_show - Return wwpn of vport whose targets maybe enabled + *		      for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * value of count on success + **/ +static ssize_t +lpfc_oas_vpt_show(struct device *dev, struct device_attribute *attr, +		  char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + +	return snprintf(buf, PAGE_SIZE, "0x%llx\n", +			wwn_to_u64(phba->cfg_oas_vpt_wwpn)); +} + +/** + * lpfc_oas_vpt_store - Store wwpn of vport whose targets maybe enabled + *		      for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_vpt_store(struct device *dev, struct device_attribute *attr, +		   const char *buf, size_t count) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; +	unsigned int cnt = count; +	uint8_t wwpn[WWN_SZ]; +	int rc; + +	if (!phba->cfg_fof) +		return -EPERM; + +	/* count may include a LF at end of string */ +	if (buf[cnt-1] == '\n') +		cnt--; + +	rc = lpfc_wwn_set(buf, cnt, wwpn); +	if (rc) +		return rc; + +	memcpy(phba->cfg_oas_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); +	memcpy(phba->sli4_hba.oas_next_vpt_wwpn, wwpn, (8 * sizeof(uint8_t))); +	if (wwn_to_u64(wwpn) == 0) +		phba->cfg_oas_flags |= OAS_FIND_ANY_VPORT; +	else +		phba->cfg_oas_flags &= ~OAS_FIND_ANY_VPORT; +	phba->cfg_oas_flags &= ~OAS_LUN_VALID; +	phba->sli4_hba.oas_next_lun = FIND_FIRST_OAS_LUN; +	return count; +} +static DEVICE_ATTR(lpfc_xlane_vpt, S_IRUGO | S_IWUSR, +		   lpfc_oas_vpt_show, lpfc_oas_vpt_store); + +/** + * lpfc_oas_lun_state_show - Return the current state (enabled or disabled) + *			    of whether luns will be enabled or disabled + *			    for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_state_show(struct device *dev, struct device_attribute *attr, +			char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + +	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_state); +} + +/** + * lpfc_oas_lun_state_store - Store the state (enabled or disabled) + *			    of whether luns will be enabled or disabled + *			    for Optimized Access Storage (OAS) operations. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * @count: Size of the data buffer. + * + * Returns: + * -EINVAL count is invalid, invalid wwpn byte invalid + * -EPERM oas is not supported by hba + * value of count on success + **/ +static ssize_t +lpfc_oas_lun_state_store(struct device *dev, struct device_attribute *attr, +			 const char *buf, size_t count) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; +	int val = 0; + +	if (!phba->cfg_fof) +		return -EPERM; + +	if (!isdigit(buf[0])) +		return -EINVAL; + +	if (sscanf(buf, "%i", &val) != 1) +		return -EINVAL; + +	if ((val != 0) && (val != 1)) +		return -EINVAL; + +	phba->cfg_oas_lun_state = val; + +	return strlen(buf); +} +static DEVICE_ATTR(lpfc_xlane_lun_state, S_IRUGO | S_IWUSR, +		   lpfc_oas_lun_state_show, lpfc_oas_lun_state_store); + +/** + * lpfc_oas_lun_status_show - Return the status of the Optimized Access + *                          Storage (OAS) lun returned by the + *                          lpfc_oas_lun_show function. + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * Returns: + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_status_show(struct device *dev, struct device_attribute *attr, +			 char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + +	if (!(phba->cfg_oas_flags & OAS_LUN_VALID)) +		return -EFAULT; + +	return snprintf(buf, PAGE_SIZE, "%d\n", phba->cfg_oas_lun_status); +} +static DEVICE_ATTR(lpfc_xlane_lun_status, S_IRUGO, +		   lpfc_oas_lun_status_show, NULL); + + +/** + * lpfc_oas_lun_state_set - enable or disable a lun for Optimized Access Storage + *			   (OAS) operations. + * @phba: lpfc_hba pointer. + * @ndlp: pointer to fcp target node. + * @lun: the fc lun for setting oas state. + * @oas_state: the oas state to be set to the lun. + * + * Returns: + * SUCCESS : 0 + * -EPERM OAS is not enabled or not supported by this port. + * + */ +static size_t +lpfc_oas_lun_state_set(struct lpfc_hba *phba, uint8_t vpt_wwpn[], +		       uint8_t tgt_wwpn[], uint64_t lun, uint32_t oas_state) +{ + +	int rc = 0; + +	if (!phba->cfg_fof) +		return -EPERM; + +	if (oas_state) { +		if (!lpfc_enable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, +					 (struct lpfc_name *)tgt_wwpn, lun)) +			rc = -ENOMEM; +	} else { +		lpfc_disable_oas_lun(phba, (struct lpfc_name *)vpt_wwpn, +				     (struct lpfc_name *)tgt_wwpn, lun); +	} +	return rc; + +} + +/** + * lpfc_oas_lun_get_next - get the next lun that has been enabled for Optimized + *			  Access Storage (OAS) operations. + * @phba: lpfc_hba pointer. + * @vpt_wwpn: wwpn of the vport associated with the returned lun + * @tgt_wwpn: wwpn of the target associated with the returned lun + * @lun_status: status of the lun returned lun + * + * Returns the first or next lun enabled for OAS operations for the vport/target + * specified.  If a lun is found, its vport wwpn, target wwpn and status is + * returned.  If the lun is not found, NOT_OAS_ENABLED_LUN is returned. + * + * Return: + * lun that is OAS enabled for the vport/target + * NOT_OAS_ENABLED_LUN when no oas enabled lun found. + */ +static uint64_t +lpfc_oas_lun_get_next(struct lpfc_hba *phba, uint8_t vpt_wwpn[], +		      uint8_t tgt_wwpn[], uint32_t *lun_status) +{ +	uint64_t found_lun; + +	if (unlikely(!phba) || !vpt_wwpn || !tgt_wwpn) +		return NOT_OAS_ENABLED_LUN; +	if (lpfc_find_next_oas_lun(phba, (struct lpfc_name *) +				   phba->sli4_hba.oas_next_vpt_wwpn, +				   (struct lpfc_name *) +				   phba->sli4_hba.oas_next_tgt_wwpn, +				   &phba->sli4_hba.oas_next_lun, +				   (struct lpfc_name *)vpt_wwpn, +				   (struct lpfc_name *)tgt_wwpn, +				   &found_lun, lun_status)) +		return found_lun; +	else +		return NOT_OAS_ENABLED_LUN; +} + +/** + * lpfc_oas_lun_state_change - enable/disable a lun for OAS operations + * @phba: lpfc_hba pointer. + * @vpt_wwpn: vport wwpn by reference. + * @tgt_wwpn: target wwpn by reference. + * @lun: the fc lun for setting oas state. + * @oas_state: the oas state to be set to the oas_lun. + * + * This routine enables (OAS_LUN_ENABLE) or disables (OAS_LUN_DISABLE) + * a lun for OAS operations. + * + * Return: + * SUCCESS: 0 + * -ENOMEM: failed to enable an lun for OAS operations + * -EPERM: OAS is not enabled + */ +static ssize_t +lpfc_oas_lun_state_change(struct lpfc_hba *phba, uint8_t vpt_wwpn[], +			  uint8_t tgt_wwpn[], uint64_t lun, +			  uint32_t oas_state) +{ + +	int rc; + +	rc = lpfc_oas_lun_state_set(phba, vpt_wwpn, tgt_wwpn, lun, +					oas_state); +	return rc; +} + +/** + * lpfc_oas_lun_show - Return oas enabled luns from a chosen target + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * This routine returns a lun enabled for OAS each time the function + * is called. + * + * Returns: + * SUCCESS: size of formatted string. + * -EFAULT: target or vport wwpn was not set properly. + * -EPERM: oas is not enabled. + **/ +static ssize_t +lpfc_oas_lun_show(struct device *dev, struct device_attribute *attr, +		  char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + +	uint64_t oas_lun; +	int len = 0; + +	if (!phba->cfg_fof) +		return -EPERM; + +	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) +		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)) +			return -EFAULT; + +	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) +		if (!(phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)) +			return -EFAULT; + +	oas_lun = lpfc_oas_lun_get_next(phba, phba->cfg_oas_vpt_wwpn, +					phba->cfg_oas_tgt_wwpn, +					&phba->cfg_oas_lun_status); +	if (oas_lun != NOT_OAS_ENABLED_LUN) +		phba->cfg_oas_flags |= OAS_LUN_VALID; + +	len += snprintf(buf + len, PAGE_SIZE-len, "0x%llx", oas_lun); + +	return len; +} + +/** + * lpfc_oas_lun_store - Sets the OAS state for lun + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: buffer for passing information. + * + * This function sets the OAS state for lun.  Before this function is called, + * the vport wwpn, target wwpn, and oas state need to be set. + * + * Returns: + * SUCCESS: size of formatted string. + * -EFAULT: target or vport wwpn was not set properly. + * -EPERM: oas is not enabled. + * size of formatted string. + **/ +static ssize_t +lpfc_oas_lun_store(struct device *dev, struct device_attribute *attr, +		   const char *buf, size_t count) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; +	uint64_t scsi_lun; +	ssize_t rc; + +	if (!phba->cfg_fof) +		return -EPERM; + +	if (wwn_to_u64(phba->cfg_oas_vpt_wwpn) == 0) +		return -EFAULT; + +	if (wwn_to_u64(phba->cfg_oas_tgt_wwpn) == 0) +		return -EFAULT; + +	if (!isdigit(buf[0])) +		return -EINVAL; + +	if (sscanf(buf, "0x%llx", &scsi_lun) != 1) +		return -EINVAL; + +	lpfc_printf_log(phba, KERN_INFO, LOG_INIT, +			"3372 Try to set vport 0x%llx target 0x%llx lun:%lld " +			"with oas set to %d\n", +			wwn_to_u64(phba->cfg_oas_vpt_wwpn), +			wwn_to_u64(phba->cfg_oas_tgt_wwpn), scsi_lun, +			phba->cfg_oas_lun_state); + +	rc = lpfc_oas_lun_state_change(phba, phba->cfg_oas_vpt_wwpn, +					   phba->cfg_oas_tgt_wwpn, scsi_lun, +					   phba->cfg_oas_lun_state); + +	if (rc) +		return rc; + +	return count; +} +static DEVICE_ATTR(lpfc_xlane_lun, S_IRUGO | S_IWUSR, +		   lpfc_oas_lun_show, lpfc_oas_lun_store);  static int lpfc_poll = 0;  module_param(lpfc_poll, int, S_IRUGO); @@ -3818,7 +4293,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,  	struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;  	struct lpfc_hba   *phba = vport->phba;  	struct lpfc_vector_map_info *cpup; -	int  idx, len = 0; +	int  len = 0;  	if ((phba->sli_rev != LPFC_SLI_REV4) ||  	    (phba->intr_type != MSIX)) @@ -3846,23 +4321,39 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr,  		break;  	} -	cpup = phba->sli4_hba.cpu_map; -	for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { +	while (phba->sli4_hba.curr_disp_cpu < phba->sli4_hba.num_present_cpu) { +		cpup = &phba->sli4_hba.cpu_map[phba->sli4_hba.curr_disp_cpu]; + +		/* margin should fit in this and the truncated message */  		if (cpup->irq == LPFC_VECTOR_MAP_EMPTY)  			len += snprintf(buf + len, PAGE_SIZE-len,  					"CPU %02d io_chan %02d "  					"physid %d coreid %d\n", -					idx, cpup->channel_id, cpup->phys_id, +					phba->sli4_hba.curr_disp_cpu, +					cpup->channel_id, cpup->phys_id,  					cpup->core_id);  		else  			len += snprintf(buf + len, PAGE_SIZE-len,  					"CPU %02d io_chan %02d "  					"physid %d coreid %d IRQ %d\n", -					idx, cpup->channel_id, cpup->phys_id, +					phba->sli4_hba.curr_disp_cpu, +					cpup->channel_id, cpup->phys_id,  					cpup->core_id, cpup->irq); -		cpup++; +		phba->sli4_hba.curr_disp_cpu++; + +		/* display max number of CPUs keeping some margin */ +		if (phba->sli4_hba.curr_disp_cpu < +				phba->sli4_hba.num_present_cpu && +				(len >= (PAGE_SIZE - 64))) { +			len += snprintf(buf + len, PAGE_SIZE-len, "more...\n"); +			break; +		}  	} + +	if (phba->sli4_hba.curr_disp_cpu == phba->sli4_hba.num_present_cpu) +		phba->sli4_hba.curr_disp_cpu = 0; +  	return len;  } @@ -4157,6 +4648,21 @@ LPFC_ATTR_R(enable_hba_reset, 1, 0, 1, "Enable HBA resets from the driver.");  LPFC_ATTR_R(enable_hba_heartbeat, 0, 0, 1, "Enable HBA Heartbeat.");  /* +# lpfc_EnableXLane: Enable Express Lane Feature +#      0x0   Express Lane Feature disabled +#      0x1   Express Lane Feature enabled +# Value range is [0,1]. Default value is 0. +*/ +LPFC_ATTR_R(EnableXLane, 0, 0, 1, "Enable Express Lane Feature."); + +/* +# lpfc_XLanePriority:  Define CS_CTL priority for Express Lane Feature +#       0x0 - 0x7f  = CS_CTL field in FC header (high 7 bits) +# Value range is [0x0,0x7f]. Default value is 0 +*/ +LPFC_ATTR_RW(XLanePriority, 0, 0x0, 0x7f, "CS_CTL for Express Lane Feature."); + +/*  # lpfc_enable_bg: Enable BlockGuard (Emulex's Implementation of T10-DIF)  #       0  = BlockGuard disabled (default)  #       1  = BlockGuard enabled @@ -4317,6 +4823,13 @@ struct device_attribute *lpfc_hba_attrs[] = {  	&dev_attr_lpfc_soft_wwn_enable,  	&dev_attr_lpfc_enable_hba_reset,  	&dev_attr_lpfc_enable_hba_heartbeat, +	&dev_attr_lpfc_EnableXLane, +	&dev_attr_lpfc_XLanePriority, +	&dev_attr_lpfc_xlane_lun, +	&dev_attr_lpfc_xlane_tgt, +	&dev_attr_lpfc_xlane_vpt, +	&dev_attr_lpfc_xlane_lun_state, +	&dev_attr_lpfc_xlane_lun_status,  	&dev_attr_lpfc_sg_seg_cnt,  	&dev_attr_lpfc_max_scsicmpl_time,  	&dev_attr_lpfc_stat_data_ctrl, @@ -4335,6 +4848,7 @@ struct device_attribute *lpfc_hba_attrs[] = {  	&dev_attr_lpfc_dss,  	&dev_attr_lpfc_sriov_hw_max_virtfn,  	&dev_attr_protocol, +	&dev_attr_lpfc_xlane_supported,  	NULL,  }; @@ -5296,11 +5810,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)  	lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);  	lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);  	lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat); +	lpfc_EnableXLane_init(phba, lpfc_EnableXLane); +	if (phba->sli_rev != LPFC_SLI_REV4) +		phba->cfg_EnableXLane = 0; +	lpfc_XLanePriority_init(phba, lpfc_XLanePriority); +	memset(phba->cfg_oas_tgt_wwpn, 0, (8 * sizeof(uint8_t))); +	memset(phba->cfg_oas_vpt_wwpn, 0, (8 * sizeof(uint8_t))); +	phba->cfg_oas_lun_state = 0; +	phba->cfg_oas_lun_status = 0; +	phba->cfg_oas_flags = 0;  	lpfc_enable_bg_init(phba, lpfc_enable_bg);  	if (phba->sli_rev == LPFC_SLI_REV4)  		phba->cfg_poll = 0;  	else -	phba->cfg_poll = lpfc_poll; +		phba->cfg_poll = lpfc_poll;  	phba->cfg_soft_wwnn = 0L;  	phba->cfg_soft_wwpn = 0L;  	lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index b92aec989d6..5b5c825d957 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2009-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2009-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -2629,7 +2629,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,  				rspiocbq,  				(phba->fc_ratov * 2)  				+ LPFC_DRVR_TIMEOUT); -	if (iocb_stat) { +	if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {  		ret_val = -EIO;  		goto err_get_xri_exit;  	} @@ -3204,8 +3204,9 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)  					     rspiocbq, (phba->fc_ratov * 2) +  					     LPFC_DRVR_TIMEOUT); -	if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) && -					   (rsp->ulpStatus != IOCB_SUCCESS))) { +	if ((iocb_stat != IOCB_SUCCESS) || +	    ((phba->sli_rev < LPFC_SLI_REV4) && +	     (rsp->ulpStatus != IOSTAT_SUCCESS))) {  		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,  				"3126 Failed loopback test issue iocb: "  				"iocb_stat:x%x\n", iocb_stat); @@ -4152,6 +4153,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,  		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {  			switch (opcode) {  			case FCOE_OPCODE_READ_FCF: +			case FCOE_OPCODE_GET_DPORT_RESULTS:  				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,  						"2957 Handled SLI_CONFIG "  						"subsys_fcoe, opcode:x%x\n", @@ -4160,6 +4162,8 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,  							nemb_mse, dmabuf);  				break;  			case FCOE_OPCODE_ADD_FCF: +			case FCOE_OPCODE_SET_DPORT_MODE: +			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:  				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,  						"2958 Handled SLI_CONFIG "  						"subsys_fcoe, opcode:x%x\n", diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index 67f7d0a160d..928ef609f36 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2010-2012 Emulex.  All rights reserved.                * + * Copyright (C) 2010-2014 Emulex.  All rights reserved.                *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -231,6 +231,8 @@ struct lpfc_sli_config_emb0_subsys {  #define SLI_CONFIG_SUBSYS_FCOE		0x0C  #define FCOE_OPCODE_READ_FCF		0x08  #define FCOE_OPCODE_ADD_FCF		0x09 +#define FCOE_OPCODE_SET_DPORT_MODE	0x27 +#define FCOE_OPCODE_GET_DPORT_RESULTS	0x28  };  struct lpfc_sli_config_emb1_subsys { diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index cda076a8423..db5604f01a1 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -187,6 +187,11 @@ void lpfc_offline_prep(struct lpfc_hba *, int);  void lpfc_offline(struct lpfc_hba *);  void lpfc_reset_hba(struct lpfc_hba *); +int lpfc_fof_queue_create(struct lpfc_hba *); +int lpfc_fof_queue_setup(struct lpfc_hba *); +int lpfc_fof_queue_destroy(struct lpfc_hba *); +irqreturn_t lpfc_sli4_fof_intr_handler(int, void *); +  int lpfc_sli_setup(struct lpfc_hba *);  int lpfc_sli_queue_setup(struct lpfc_hba *); @@ -242,6 +247,7 @@ int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);  void lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *);  int lpfc_mem_alloc(struct lpfc_hba *, int align); +int lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *);  void lpfc_mem_free(struct lpfc_hba *);  void lpfc_mem_free_all(struct lpfc_hba *);  void lpfc_stop_vport_timers(struct lpfc_vport *); @@ -283,6 +289,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,  void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);  void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);  void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); +void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba);  void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);  void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);  int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, @@ -304,6 +311,9 @@ int lpfc_sli_issue_abort_iotag(struct lpfc_hba *, struct lpfc_sli_ring *,  int lpfc_sli_sum_iocb(struct lpfc_vport *, uint16_t, uint64_t, lpfc_ctx_cmd);  int lpfc_sli_abort_iocb(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t,  			uint64_t, lpfc_ctx_cmd); +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *, +			uint16_t, uint64_t, lpfc_ctx_cmd);  void lpfc_mbox_timeout(unsigned long);  void lpfc_mbox_timeout_handler(struct lpfc_hba *); @@ -399,7 +409,6 @@ void lpfc_fabric_block_timeout(unsigned long);  void lpfc_unblock_fabric_iocbs(struct lpfc_hba *);  void lpfc_rampdown_queue_depth(struct lpfc_hba *);  void lpfc_ramp_down_queue_handler(struct lpfc_hba *); -void lpfc_ramp_up_queue_handler(struct lpfc_hba *);  void lpfc_scsi_dev_block(struct lpfc_hba *);  void @@ -471,3 +480,20 @@ void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);  uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);  int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);  void lpfc_sli4_offline_eratt(struct lpfc_hba *); + +struct lpfc_device_data *lpfc_create_device_data(struct lpfc_hba *, +						struct lpfc_name *, +						struct lpfc_name *, +						uint64_t, bool); +void lpfc_delete_device_data(struct lpfc_hba *, struct lpfc_device_data*); +struct lpfc_device_data *__lpfc_get_device_data(struct lpfc_hba *, +					struct list_head *list, +					struct lpfc_name *, +					struct lpfc_name *, uint64_t); +bool lpfc_enable_oas_lun(struct lpfc_hba *, struct lpfc_name *, +			 struct lpfc_name *, uint64_t); +bool lpfc_disable_oas_lun(struct lpfc_hba *, struct lpfc_name *, +			  struct lpfc_name *, uint64_t); +bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *, +			    struct lpfc_name *, uint64_t *, struct lpfc_name *, +			    struct lpfc_name *, uint64_t *, uint32_t *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 02e8cd923d0..da61d8dc044 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -280,7 +280,7 @@ lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)  		buf_ptr = (struct lpfc_dmabuf *) ctiocb->context3;  		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);  		kfree(buf_ptr); -		ctiocb->context1 = NULL; +		ctiocb->context3 = NULL;  	}  	lpfc_sli_release_iocbq(phba, ctiocb);  	return 0; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 60084e6ad2f..b0aedce3f54 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2007-2012 Emulex.  All rights reserved.           * + * Copyright (C) 2007-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -2280,6 +2280,104 @@ proc_cq:  		}  	} +	if (phba->cfg_fof) { +		/* FOF EQ */ +		qp = phba->sli4_hba.fof_eq; +		if (!qp) +			goto out; + +		len += snprintf(pbuffer+len, +			LPFC_QUE_INFO_GET_BUF_SIZE-len, +			"\nFOF EQ info: " +			"EQ-STAT[max:x%x noE:x%x " +			"bs:x%x proc:x%llx]\n", +			qp->q_cnt_1, qp->q_cnt_2, +			qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); + +		len += snprintf(pbuffer+len, +			LPFC_QUE_INFO_GET_BUF_SIZE-len, +			"EQID[%02d], " +			"QE-CNT[%04d], QE-SIZE[%04d], " +			"HOST-IDX[%04d], PORT-IDX[%04d]", +			qp->queue_id, +			qp->entry_count, +			qp->entry_size, +			qp->host_index, +			qp->hba_index); + +		/* Reset max counter */ +		qp->EQ_max_eqe = 0; + +		len +=  snprintf(pbuffer+len, +			LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); +		if (len >= max_cnt) +			goto too_big; +	} + +	if (phba->cfg_fof) { + +		/* OAS CQ */ +		qp = phba->sli4_hba.oas_cq; +		if (qp) { +			len += snprintf(pbuffer+len, +				LPFC_QUE_INFO_GET_BUF_SIZE-len, +				"\tOAS CQ info: "); +			len += snprintf(pbuffer+len, +				LPFC_QUE_INFO_GET_BUF_SIZE-len, +				"AssocEQID[%02d]: " +				"CQ STAT[max:x%x relw:x%x " +				"xabt:x%x wq:x%llx]\n", +				qp->assoc_qid, +				qp->q_cnt_1, qp->q_cnt_2, +				qp->q_cnt_3, (unsigned long long)qp->q_cnt_4); +			len += snprintf(pbuffer+len, +				LPFC_QUE_INFO_GET_BUF_SIZE-len, +				"\tCQID[%02d], " +				"QE-CNT[%04d], QE-SIZE[%04d], " +				"HOST-IDX[%04d], PORT-IDX[%04d]", +				qp->queue_id, qp->entry_count, +				qp->entry_size, qp->host_index, +				qp->hba_index); + +			/* Reset max counter */ +			qp->CQ_max_cqe = 0; + +			len +=  snprintf(pbuffer+len, +				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); +			if (len >= max_cnt) +				goto too_big; +		} + +		/* OAS WQ */ +		qp = phba->sli4_hba.oas_wq; +		if (qp) { +			len += snprintf(pbuffer+len, +				LPFC_QUE_INFO_GET_BUF_SIZE-len, +				"\t\tOAS WQ info: "); +			len += snprintf(pbuffer+len, +				LPFC_QUE_INFO_GET_BUF_SIZE-len, +				"AssocCQID[%02d]: " +				"WQ-STAT[oflow:x%x posted:x%llx]\n", +				qp->assoc_qid, +				qp->q_cnt_1, (unsigned long long)qp->q_cnt_4); +			len += snprintf(pbuffer+len, +				LPFC_QUE_INFO_GET_BUF_SIZE-len, +				"\t\tWQID[%02d], " +				"QE-CNT[%04d], QE-SIZE[%04d], " +				"HOST-IDX[%04d], PORT-IDX[%04d]", +				qp->queue_id, +				qp->entry_count, +				qp->entry_size, +				qp->host_index, +				qp->hba_index); + +			len +=  snprintf(pbuffer+len, +				LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n"); +			if (len >= max_cnt) +				goto too_big; +		} +	} +out:  	spin_unlock_irq(&phba->hbalock);  	return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len); @@ -3927,6 +4025,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)  	struct lpfc_hba   *phba = vport->phba;  	char name[64];  	uint32_t num, i; +	bool pport_setup = false;  	if (!lpfc_debugfs_enable)  		return; @@ -3947,6 +4046,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)  	/* Setup funcX directory for specific HBA PCI function */  	snprintf(name, sizeof(name), "fn%d", phba->brd_no);  	if (!phba->hba_debugfs_root) { +		pport_setup = true;  		phba->hba_debugfs_root =  			debugfs_create_dir(name, lpfc_debugfs_root);  		if (!phba->hba_debugfs_root) { @@ -4001,7 +4101,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)  				goto debug_failed;  			}  		} else -			phba->debug_dumpHBASlim = NULL; +			phba->debug_dumpHostSlim = NULL;  		/* Setup dumpData */  		snprintf(name, sizeof(name), "dumpData"); @@ -4239,6 +4339,14 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)  	}  	/* +	 * The following section is for additional directories/files for the +	 * physical port. +	 */ + +	if (!pport_setup) +		goto debug_failed; + +	/*  	 * iDiag debugfs root entry points for SLI4 device only  	 */  	if (phba->sli_rev < LPFC_SLI_REV4) diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index e409ba5f728..1a6fe524940 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -116,7 +116,7 @@ struct lpfc_nodelist {  	atomic_t cmd_pending;  	uint32_t cmd_qdepth;  	unsigned long last_change_time; -	struct lpfc_node_rrqs active_rrqs; +	unsigned long *active_rrqs_xri_bitmap;  	struct lpfc_scsicmd_bkt *lat_data;	/* Latency data */  };  struct lpfc_node_rrq { diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 110445f0c58..7a5d81a65be 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   * Portions Copyright (C) 2004-2005 Christoph Hellwig              * @@ -1516,7 +1516,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,  	uint32_t rc, keepDID = 0;  	int  put_node;  	int  put_rport; -	struct lpfc_node_rrqs rrq; +	unsigned long *active_rrqs_xri_bitmap = NULL;  	/* Fabric nodes can have the same WWPN so we don't bother searching  	 * by WWPN.  Just return the ndlp that was given to us. @@ -1534,7 +1534,13 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,  	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))  		return ndlp; -	memset(&rrq.xri_bitmap, 0, sizeof(new_ndlp->active_rrqs.xri_bitmap)); +	if (phba->sli_rev == LPFC_SLI_REV4) { +		active_rrqs_xri_bitmap = mempool_alloc(phba->active_rrq_pool, +						       GFP_KERNEL); +		if (active_rrqs_xri_bitmap) +			memset(active_rrqs_xri_bitmap, 0, +			       phba->cfg_rrq_xri_bitmap_sz); +	}  	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,  		 "3178 PLOGI confirm: ndlp %p x%x: new_ndlp %p\n", @@ -1543,41 +1549,58 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,  	if (!new_ndlp) {  		rc = memcmp(&ndlp->nlp_portname, name,  			    sizeof(struct lpfc_name)); -		if (!rc) +		if (!rc) { +			if (active_rrqs_xri_bitmap) +				mempool_free(active_rrqs_xri_bitmap, +					     phba->active_rrq_pool);  			return ndlp; +		}  		new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC); -		if (!new_ndlp) +		if (!new_ndlp) { +			if (active_rrqs_xri_bitmap) +				mempool_free(active_rrqs_xri_bitmap, +					     phba->active_rrq_pool);  			return ndlp; +		}  		lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);  	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {  		rc = memcmp(&ndlp->nlp_portname, name,  			    sizeof(struct lpfc_name)); -		if (!rc) +		if (!rc) { +			if (active_rrqs_xri_bitmap) +				mempool_free(active_rrqs_xri_bitmap, +					     phba->active_rrq_pool);  			return ndlp; +		}  		new_ndlp = lpfc_enable_node(vport, new_ndlp,  						NLP_STE_UNUSED_NODE); -		if (!new_ndlp) +		if (!new_ndlp) { +			if (active_rrqs_xri_bitmap) +				mempool_free(active_rrqs_xri_bitmap, +					     phba->active_rrq_pool);  			return ndlp; +		}  		keepDID = new_ndlp->nlp_DID; -		if (phba->sli_rev == LPFC_SLI_REV4) -			memcpy(&rrq.xri_bitmap, -				&new_ndlp->active_rrqs.xri_bitmap, -				sizeof(new_ndlp->active_rrqs.xri_bitmap)); +		if ((phba->sli_rev == LPFC_SLI_REV4) && active_rrqs_xri_bitmap) +			memcpy(active_rrqs_xri_bitmap, +			       new_ndlp->active_rrqs_xri_bitmap, +			       phba->cfg_rrq_xri_bitmap_sz);  	} else {  		keepDID = new_ndlp->nlp_DID; -		if (phba->sli_rev == LPFC_SLI_REV4) -			memcpy(&rrq.xri_bitmap, -				&new_ndlp->active_rrqs.xri_bitmap, -				sizeof(new_ndlp->active_rrqs.xri_bitmap)); +		if (phba->sli_rev == LPFC_SLI_REV4 && +		    active_rrqs_xri_bitmap) +			memcpy(active_rrqs_xri_bitmap, +			       new_ndlp->active_rrqs_xri_bitmap, +			       phba->cfg_rrq_xri_bitmap_sz);  	}  	lpfc_unreg_rpi(vport, new_ndlp);  	new_ndlp->nlp_DID = ndlp->nlp_DID;  	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;  	if (phba->sli_rev == LPFC_SLI_REV4) -		memcpy(new_ndlp->active_rrqs.xri_bitmap, -			&ndlp->active_rrqs.xri_bitmap, -			sizeof(ndlp->active_rrqs.xri_bitmap)); +		memcpy(new_ndlp->active_rrqs_xri_bitmap, +		       ndlp->active_rrqs_xri_bitmap, +		       phba->cfg_rrq_xri_bitmap_sz);  	if (ndlp->nlp_flag & NLP_NPR_2B_DISC)  		new_ndlp->nlp_flag |= NLP_NPR_2B_DISC; @@ -1619,10 +1642,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,  		/* Two ndlps cannot have the same did on the nodelist */  		ndlp->nlp_DID = keepDID; -		if (phba->sli_rev == LPFC_SLI_REV4) -			memcpy(&ndlp->active_rrqs.xri_bitmap, -				&rrq.xri_bitmap, -				sizeof(ndlp->active_rrqs.xri_bitmap)); +		if (phba->sli_rev == LPFC_SLI_REV4 && +		    active_rrqs_xri_bitmap) +			memcpy(ndlp->active_rrqs_xri_bitmap, +			       active_rrqs_xri_bitmap, +			       phba->cfg_rrq_xri_bitmap_sz);  		lpfc_drop_node(vport, ndlp);  	}  	else { @@ -1634,10 +1658,11 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,  		/* Two ndlps cannot have the same did */  		ndlp->nlp_DID = keepDID; -		if (phba->sli_rev == LPFC_SLI_REV4) -			memcpy(&ndlp->active_rrqs.xri_bitmap, -				&rrq.xri_bitmap, -				sizeof(ndlp->active_rrqs.xri_bitmap)); +		if (phba->sli_rev == LPFC_SLI_REV4 && +		    active_rrqs_xri_bitmap) +			memcpy(ndlp->active_rrqs_xri_bitmap, +			       active_rrqs_xri_bitmap, +			       phba->cfg_rrq_xri_bitmap_sz);  		/* Since we are swapping the ndlp passed in with the new one  		 * and the did has already been swapped, copy over state. @@ -1668,6 +1693,10 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,  				put_device(&rport->dev);  		}  	} +	if (phba->sli_rev == LPFC_SLI_REV4 && +	    active_rrqs_xri_bitmap) +		mempool_free(active_rrqs_xri_bitmap, +			     phba->active_rrq_pool);  	return new_ndlp;  } @@ -2772,6 +2801,7 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)  	/* This will cause the callback-function lpfc_cmpl_els_cmd to  	 * trigger the release of node.  	 */ +  	lpfc_nlp_put(ndlp);  	return 0;  } @@ -6193,11 +6223,11 @@ lpfc_els_timeout(unsigned long ptr)  	spin_lock_irqsave(&vport->work_port_lock, iflag);  	tmo_posted = vport->work_port_events & WORKER_ELS_TMO; -	if (!tmo_posted) +	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))  		vport->work_port_events |= WORKER_ELS_TMO;  	spin_unlock_irqrestore(&vport->work_port_lock, iflag); -	if (!tmo_posted) +	if ((!tmo_posted) && (!(vport->load_flag & FC_UNLOADING)))  		lpfc_worker_wake_up(phba);  	return;  } @@ -6223,19 +6253,26 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)  	uint32_t els_command = 0;  	uint32_t timeout;  	uint32_t remote_ID = 0xffffffff; -	LIST_HEAD(txcmplq_completions);  	LIST_HEAD(abort_list);  	timeout = (uint32_t)(phba->fc_ratov << 1);  	pring = &phba->sli.ring[LPFC_ELS_RING]; - +	if ((phba->pport->load_flag & FC_UNLOADING)) +		return;  	spin_lock_irq(&phba->hbalock); -	list_splice_init(&pring->txcmplq, &txcmplq_completions); -	spin_unlock_irq(&phba->hbalock); +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_lock(&pring->ring_lock); + +	if ((phba->pport->load_flag & FC_UNLOADING)) { +		if (phba->sli_rev == LPFC_SLI_REV4) +			spin_unlock(&pring->ring_lock); +		spin_unlock_irq(&phba->hbalock); +		return; +	} -	list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) { +	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {  		cmd = &piocb->iocb;  		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 || @@ -6274,11 +6311,12 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)  		}  		list_add_tail(&piocb->dlist, &abort_list);  	} -	spin_lock_irq(&phba->hbalock); -	list_splice(&txcmplq_completions, &pring->txcmplq); +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_unlock(&pring->ring_lock);  	spin_unlock_irq(&phba->hbalock);  	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { +		cmd = &piocb->iocb;  		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,  			 "0127 ELS timeout Data: x%x x%x x%x "  			 "x%x\n", els_command, @@ -6290,8 +6328,9 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)  	}  	if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) -		mod_timer(&vport->els_tmofunc, -			  jiffies + msecs_to_jiffies(1000 * timeout)); +		if (!(phba->pport->load_flag & FC_UNLOADING)) +			mod_timer(&vport->els_tmofunc, +				  jiffies + msecs_to_jiffies(1000 * timeout));  }  /** @@ -6317,15 +6356,50 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)  void  lpfc_els_flush_cmd(struct lpfc_vport *vport)  { -	LIST_HEAD(completions); +	LIST_HEAD(abort_list);  	struct lpfc_hba  *phba = vport->phba;  	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];  	struct lpfc_iocbq *tmp_iocb, *piocb;  	IOCB_t *cmd = NULL;  	lpfc_fabric_abort_vport(vport); +	/* +	 * For SLI3, only the hbalock is required.  But SLI4 needs to coordinate +	 * with the ring insert operation.  Because lpfc_sli_issue_abort_iotag +	 * ultimately grabs the ring_lock, the driver must splice the list into +	 * a working list and release the locks before calling the abort. +	 */ +	spin_lock_irq(&phba->hbalock); +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_lock(&pring->ring_lock); + +	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { +		if (piocb->iocb_flag & LPFC_IO_LIBDFC) +			continue; + +		if (piocb->vport != vport) +			continue; +		list_add_tail(&piocb->dlist, &abort_list); +	} +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_unlock(&pring->ring_lock); +	spin_unlock_irq(&phba->hbalock); +	/* Abort each iocb on the aborted list and remove the dlist links. */ +	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { +		spin_lock_irq(&phba->hbalock); +		list_del_init(&piocb->dlist); +		lpfc_sli_issue_abort_iotag(phba, pring, piocb); +		spin_unlock_irq(&phba->hbalock); +	} +	if (!list_empty(&abort_list)) +		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, +				 "3387 abort list for txq not empty\n"); +	INIT_LIST_HEAD(&abort_list);  	spin_lock_irq(&phba->hbalock); +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_lock(&pring->ring_lock); +  	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {  		cmd = &piocb->iocb; @@ -6343,24 +6417,16 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)  		if (piocb->vport != vport)  			continue; -		list_move_tail(&piocb->list, &completions); -	} - -	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { -		if (piocb->iocb_flag & LPFC_IO_LIBDFC) { -			continue; -		} - -		if (piocb->vport != vport) -			continue; - -		lpfc_sli_issue_abort_iotag(phba, pring, piocb); +		list_del_init(&piocb->list); +		list_add_tail(&piocb->list, &abort_list);  	} +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_unlock(&pring->ring_lock);  	spin_unlock_irq(&phba->hbalock);  	/* Cancell all the IOCBs from the completions list */ -	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, -			      IOERR_SLI_ABORTED); +	lpfc_sli_cancel_iocbs(phba, &abort_list, +			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);  	return;  } @@ -6385,35 +6451,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)  void  lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)  { -	LIST_HEAD(completions); -	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; -	struct lpfc_iocbq *tmp_iocb, *piocb; -	IOCB_t *cmd = NULL; - -	lpfc_fabric_abort_hba(phba); -	spin_lock_irq(&phba->hbalock); -	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { -		cmd = &piocb->iocb; -		if (piocb->iocb_flag & LPFC_IO_LIBDFC) -			continue; -		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */ -		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN || -		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN || -		    cmd->ulpCommand == CMD_CLOSE_XRI_CN || -		    cmd->ulpCommand == CMD_ABORT_XRI_CN) -			continue; -		list_move_tail(&piocb->list, &completions); -	} -	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { -		if (piocb->iocb_flag & LPFC_IO_LIBDFC) -			continue; -		lpfc_sli_issue_abort_iotag(phba, pring, piocb); -	} -	spin_unlock_irq(&phba->hbalock); - -	/* Cancel all the IOCBs from the completions list */ -	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, -			      IOERR_SLI_ABORTED); +	struct lpfc_vport *vport; +	list_for_each_entry(vport, &phba->port_list, listentry) +		lpfc_els_flush_cmd(vport);  	return;  } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 7801601aa5d..2a17e31265b 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   * Portions Copyright (C) 2004-2005 Christoph Hellwig              * @@ -674,8 +674,6 @@ lpfc_work_done(struct lpfc_hba *phba)  				lpfc_fdmi_timeout_handler(vport);  			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)  				lpfc_ramp_down_queue_handler(phba); -			if (work_port_events & WORKER_RAMP_UP_QUEUE) -				lpfc_ramp_up_queue_handler(phba);  			if (work_port_events & WORKER_DELAYED_DISC_TMO)  				lpfc_delayed_disc_timeout_handler(vport);  		} @@ -733,7 +731,7 @@ lpfc_do_work(void *p)  	struct lpfc_hba *phba = p;  	int rc; -	set_user_nice(current, -20); +	set_user_nice(current, MIN_NICE);  	current->flags |= PF_NOFREEZE;  	phba->data_flags = 0; @@ -2545,8 +2543,11 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)  	if (!new_fcf_record) {  		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,  				"2766 Mailbox command READ_FCF_RECORD " -				"failed to retrieve a FCF record.\n"); -		goto error_out; +				"failed to retrieve a FCF record. " +				"hba_flg x%x fcf_flg x%x\n", phba->hba_flag, +				phba->fcf.fcf_flag); +		lpfc_unregister_fcf_rescan(phba); +		goto out;  	}  	/* Get the needed parameters from FCF record */ @@ -3973,7 +3974,10 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)  		vport->fc_map_cnt += count;  		break;  	case NLP_STE_NPR_NODE: -		vport->fc_npr_cnt += count; +		if (vport->fc_npr_cnt == 0 && count == -1) +			vport->fc_npr_cnt = 0; +		else +			vport->fc_npr_cnt += count;  		break;  	}  	spin_unlock_irq(shost->host_lock); @@ -4171,8 +4175,6 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,  	NLP_INT_NODE_ACT(ndlp);  	atomic_set(&ndlp->cmd_pending, 0);  	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; -	if (vport->phba->sli_rev == LPFC_SLI_REV4) -		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);  }  struct lpfc_nodelist * @@ -4182,6 +4184,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,  	struct lpfc_hba *phba = vport->phba;  	uint32_t did;  	unsigned long flags; +	unsigned long *active_rrqs_xri_bitmap = NULL;  	if (!ndlp)  		return NULL; @@ -4210,13 +4213,21 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,  	/* Keep the original DID */  	did = ndlp->nlp_DID; +	if (phba->sli_rev == LPFC_SLI_REV4) +		active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;  	/* re-initialize ndlp except of ndlp linked list pointer */  	memset((((char *)ndlp) + sizeof (struct list_head)), 0,  		sizeof (struct lpfc_nodelist) - sizeof (struct list_head));  	lpfc_initialize_node(vport, ndlp, did); +	if (phba->sli_rev == LPFC_SLI_REV4) +		ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap; +  	spin_unlock_irqrestore(&phba->ndlp_lock, flags); +	if (vport->phba->sli_rev == LPFC_SLI_REV4) +		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); +  	if (state != NLP_STE_UNUSED_NODE)  		lpfc_nlp_set_state(vport, ndlp, state); @@ -4798,9 +4809,10 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)  				 ((uint32_t) ndlp->nlp_rpi & 0xff));  			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,  					 "0929 FIND node DID " -					 "Data: x%p x%x x%x x%x\n", +					 "Data: x%p x%x x%x x%x %p\n",  					 ndlp, ndlp->nlp_DID, -					 ndlp->nlp_flag, data1); +					 ndlp->nlp_flag, data1, +					 ndlp->active_rrqs_xri_bitmap);  			return ndlp;  		}  	} @@ -5617,6 +5629,17 @@ lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,  	lpfc_initialize_node(vport, ndlp, did);  	INIT_LIST_HEAD(&ndlp->nlp_listp); +	if (vport->phba->sli_rev == LPFC_SLI_REV4) { +		ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); +		ndlp->active_rrqs_xri_bitmap = +				mempool_alloc(vport->phba->active_rrq_pool, +					      GFP_KERNEL); +		if (ndlp->active_rrqs_xri_bitmap) +			memset(ndlp->active_rrqs_xri_bitmap, 0, +			       ndlp->phba->cfg_rrq_xri_bitmap_sz); +	} + +  	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,  		"node init:       did:x%x", @@ -5660,6 +5683,9 @@ lpfc_nlp_release(struct kref *kref)  	/* free ndlp memory for final ndlp release */  	if (NLP_CHK_FREE_REQ(ndlp)) {  		kfree(ndlp->lat_data); +		if (phba->sli_rev == LPFC_SLI_REV4) +			mempool_free(ndlp->active_rrqs_xri_bitmap, +				     ndlp->phba->active_rrq_pool);  		mempool_free(ndlp, ndlp->phba->nlp_mem_pool);  	}  } @@ -6166,10 +6192,6 @@ lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,  		memcpy(&conn_entry->conn_rec, &conn_rec[i],  			sizeof(struct lpfc_fcf_conn_rec)); -		conn_entry->conn_rec.vlan_tag = -			conn_entry->conn_rec.vlan_tag; -		conn_entry->conn_rec.flags = -			conn_entry->conn_rec.flags;  		list_add_tail(&conn_entry->list,  			&phba->fcf_conn_rec_list);  	} diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 6f927d30ca6..23625925237 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -45,6 +45,7 @@  #define LPFC_EXTRA_RING          1	/* ring 1 for other protocols */  #define LPFC_ELS_RING            2	/* ring 2 for ELS commands */  #define LPFC_FCP_NEXT_RING       3 +#define LPFC_FCP_OAS_RING        3  #define SLI2_IOCB_CMD_R0_ENTRIES    172	/* SLI-2 FCP command ring entries */  #define SLI2_IOCB_RSP_R0_ENTRIES    134	/* SLI-2 FCP response ring entries */ diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 086c3f28caa..f432ec180cf 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2009-2013 Emulex.  All rights reserved.                * + * Copyright (C) 2009-2014 Emulex.  All rights reserved.                *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -2616,6 +2616,9 @@ struct lpfc_sli4_parameters {  #define cfg_phwq_SHIFT				15  #define cfg_phwq_MASK				0x00000001  #define cfg_phwq_WORD				word12 +#define cfg_oas_SHIFT				25 +#define cfg_oas_MASK				0x00000001 +#define cfg_oas_WORD				word12  #define cfg_loopbk_scope_SHIFT			28  #define cfg_loopbk_scope_MASK			0x0000000f  #define cfg_loopbk_scope_WORD			word12 @@ -3322,6 +3325,9 @@ struct wqe_common {  #define wqe_ebde_cnt_SHIFT    0  #define wqe_ebde_cnt_MASK     0x0000000f  #define wqe_ebde_cnt_WORD     word10 +#define wqe_oas_SHIFT         6 +#define wqe_oas_MASK          0x00000001 +#define wqe_oas_WORD          word10  #define wqe_lenloc_SHIFT      7  #define wqe_lenloc_MASK       0x00000003  #define wqe_lenloc_WORD       word10 @@ -3439,7 +3445,8 @@ struct els_request64_wqe {  #define els_req64_hopcnt_SHIFT      24  #define els_req64_hopcnt_MASK       0x000000ff  #define els_req64_hopcnt_WORD       word13 -	uint32_t reserved[2]; +	uint32_t word14; +	uint32_t max_response_payload_len;  };  struct xmit_els_rsp64_wqe { @@ -3554,7 +3561,8 @@ struct gen_req64_wqe {  	uint32_t relative_offset;  	struct wqe_rctl_dfctl wge_ctl; /* word 5 */  	struct wqe_common wqe_com;     /* words 6-11 */ -	uint32_t rsvd_12_15[4]; +	uint32_t rsvd_12_14[3]; +	uint32_t max_response_payload_len;  };  struct create_xri_wqe { @@ -3584,7 +3592,13 @@ struct abort_cmd_wqe {  struct fcp_iwrite64_wqe {  	struct ulp_bde64 bde; -	uint32_t payload_offset_len; +	uint32_t word3; +#define	cmd_buff_len_SHIFT  16 +#define	cmd_buff_len_MASK  0x00000ffff +#define	cmd_buff_len_WORD  word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3  	uint32_t total_xfer_len;  	uint32_t initial_xfer_len;  	struct wqe_common wqe_com;     /* words 6-11 */ @@ -3594,7 +3608,13 @@ struct fcp_iwrite64_wqe {  struct fcp_iread64_wqe {  	struct ulp_bde64 bde; -	uint32_t payload_offset_len;   /* word 3 */ +	uint32_t word3; +#define	cmd_buff_len_SHIFT  16 +#define	cmd_buff_len_MASK  0x00000ffff +#define	cmd_buff_len_WORD  word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3  	uint32_t total_xfer_len;       /* word 4 */  	uint32_t rsrvd5;               /* word 5 */  	struct wqe_common wqe_com;     /* words 6-11 */ @@ -3604,7 +3624,13 @@ struct fcp_iread64_wqe {  struct fcp_icmnd64_wqe {  	struct ulp_bde64 bde;          /* words 0-2 */ -	uint32_t rsrvd3;               /* word 3 */ +	uint32_t word3; +#define	cmd_buff_len_SHIFT  16 +#define	cmd_buff_len_MASK  0x00000ffff +#define	cmd_buff_len_WORD  word3 +#define payload_offset_len_SHIFT 0 +#define payload_offset_len_MASK 0x0000ffff +#define payload_offset_len_WORD word3  	uint32_t rsrvd4;               /* word 4 */  	uint32_t rsrvd5;               /* word 5 */  	struct wqe_common wqe_com;     /* words 6-11 */ diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 647f5bfb3bd..06f9a5b79e6 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   * Portions Copyright (C) 2004-2005 Christoph Hellwig              * @@ -80,6 +80,7 @@ static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);  static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);  static void lpfc_sli4_disable_intr(struct lpfc_hba *);  static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t); +static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);  static struct scsi_transport_template *lpfc_transport_template = NULL;  static struct scsi_transport_template *lpfc_vport_transport_template = NULL; @@ -819,57 +820,153 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)  }  /** - * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset + * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free + * rspiocb which got deferred + *   * @phba: pointer to lpfc HBA data structure.   * - * This routine will do uninitialization after the HBA is reset when bring - * down the SLI Layer. + * This routine will cleanup completed slow path events after HBA is reset + * when bringing down the SLI Layer. + *   *   * Return codes - *   0 - success. - *   Any other value - error. + *   void.   **/ -static int -lpfc_hba_down_post_s3(struct lpfc_hba *phba) +static void +lpfc_sli4_free_sp_events(struct lpfc_hba *phba) +{ +	struct lpfc_iocbq *rspiocbq; +	struct hbq_dmabuf *dmabuf; +	struct lpfc_cq_event *cq_event; + +	spin_lock_irq(&phba->hbalock); +	phba->hba_flag &= ~HBA_SP_QUEUE_EVT; +	spin_unlock_irq(&phba->hbalock); + +	while (!list_empty(&phba->sli4_hba.sp_queue_event)) { +		/* Get the response iocb from the head of work queue */ +		spin_lock_irq(&phba->hbalock); +		list_remove_head(&phba->sli4_hba.sp_queue_event, +				 cq_event, struct lpfc_cq_event, list); +		spin_unlock_irq(&phba->hbalock); + +		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) { +		case CQE_CODE_COMPL_WQE: +			rspiocbq = container_of(cq_event, struct lpfc_iocbq, +						 cq_event); +			lpfc_sli_release_iocbq(phba, rspiocbq); +			break; +		case CQE_CODE_RECEIVE: +		case CQE_CODE_RECEIVE_V1: +			dmabuf = container_of(cq_event, struct hbq_dmabuf, +					      cq_event); +			lpfc_in_buf_free(phba, &dmabuf->dbuf); +		} +	} +} + +/** + * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup posted ELS buffers after the HBA is reset + * when bringing down the SLI Layer. + * + * + * Return codes + *   void. + **/ +static void +lpfc_hba_free_post_buf(struct lpfc_hba *phba)  {  	struct lpfc_sli *psli = &phba->sli;  	struct lpfc_sli_ring *pring;  	struct lpfc_dmabuf *mp, *next_mp; -	LIST_HEAD(completions); -	int i; +	LIST_HEAD(buflist); +	int count;  	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)  		lpfc_sli_hbqbuf_free_all(phba);  	else {  		/* Cleanup preposted buffers on the ELS ring */  		pring = &psli->ring[LPFC_ELS_RING]; -		list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) { +		spin_lock_irq(&phba->hbalock); +		list_splice_init(&pring->postbufq, &buflist); +		spin_unlock_irq(&phba->hbalock); + +		count = 0; +		list_for_each_entry_safe(mp, next_mp, &buflist, list) {  			list_del(&mp->list); -			pring->postbufq_cnt--; +			count++;  			lpfc_mbuf_free(phba, mp->virt, mp->phys);  			kfree(mp);  		} + +		spin_lock_irq(&phba->hbalock); +		pring->postbufq_cnt -= count; +		spin_unlock_irq(&phba->hbalock);  	} +} + +/** + * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset + * @phba: pointer to lpfc HBA data structure. + * + * This routine will cleanup the txcmplq after the HBA is reset when bringing + * down the SLI Layer. + * + * Return codes + *   void + **/ +static void +lpfc_hba_clean_txcmplq(struct lpfc_hba *phba) +{ +	struct lpfc_sli *psli = &phba->sli; +	struct lpfc_sli_ring *pring; +	LIST_HEAD(completions); +	int i; -	spin_lock_irq(&phba->hbalock);  	for (i = 0; i < psli->num_rings; i++) {  		pring = &psli->ring[i]; - +		if (phba->sli_rev >= LPFC_SLI_REV4) +			spin_lock_irq(&pring->ring_lock); +		else +			spin_lock_irq(&phba->hbalock);  		/* At this point in time the HBA is either reset or DOA. Either  		 * way, nothing should be on txcmplq as it will NEVER complete.  		 */  		list_splice_init(&pring->txcmplq, &completions); -		spin_unlock_irq(&phba->hbalock); +		pring->txcmplq_cnt = 0; + +		if (phba->sli_rev >= LPFC_SLI_REV4) +			spin_unlock_irq(&pring->ring_lock); +		else +			spin_unlock_irq(&phba->hbalock);  		/* Cancel all the IOCBs from the completions list */  		lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,  				      IOERR_SLI_ABORTED); -  		lpfc_sli_abort_iocb_ring(phba, pring); -		spin_lock_irq(&phba->hbalock);  	} -	spin_unlock_irq(&phba->hbalock); +} +/** + * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset +	int i; + * @phba: pointer to lpfc HBA data structure. + * + * This routine will do uninitialization after the HBA is reset when bring + * down the SLI Layer. + * + * Return codes + *   0 - success. + *   Any other value - error. + **/ +static int +lpfc_hba_down_post_s3(struct lpfc_hba *phba) +{ +	lpfc_hba_free_post_buf(phba); +	lpfc_hba_clean_txcmplq(phba);  	return 0;  } @@ -889,13 +986,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)  {  	struct lpfc_scsi_buf *psb, *psb_next;  	LIST_HEAD(aborts); -	int ret;  	unsigned long iflag = 0;  	struct lpfc_sglq *sglq_entry = NULL; -	ret = lpfc_hba_down_post_s3(phba); -	if (ret) -		return ret; +	lpfc_hba_free_post_buf(phba); +	lpfc_hba_clean_txcmplq(phba); +  	/* At this point in time the HBA is either reset or DOA. Either  	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be  	 * on the lpfc_sgl_list so that it can either be freed if the @@ -931,6 +1027,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)  	spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);  	list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);  	spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); + +	lpfc_sli4_free_sp_events(phba);  	return 0;  } @@ -1005,9 +1103,14 @@ lpfc_rrq_timeout(unsigned long ptr)  	phba = (struct lpfc_hba *)ptr;  	spin_lock_irqsave(&phba->pport->work_port_lock, iflag); -	phba->hba_flag |= HBA_RRQ_ACTIVE; +	if (!(phba->pport->load_flag & FC_UNLOADING)) +		phba->hba_flag |= HBA_RRQ_ACTIVE; +	else +		phba->hba_flag &= ~HBA_RRQ_ACTIVE;  	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag); -	lpfc_worker_wake_up(phba); + +	if (!(phba->pport->load_flag & FC_UNLOADING)) +		lpfc_worker_wake_up(phba);  }  /** @@ -1244,7 +1347,6 @@ static void  lpfc_handle_deferred_eratt(struct lpfc_hba *phba)  {  	uint32_t old_host_status = phba->work_hs; -	struct lpfc_sli_ring  *pring;  	struct lpfc_sli *psli = &phba->sli;  	/* If the pci channel is offline, ignore possible errors, @@ -1273,8 +1375,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)  	 * dropped by the firmware. Error iocb (I/O) on txcmplq and let the  	 * SCSI layer retry it after re-establishing link.  	 */ -	pring = &psli->ring[psli->fcp_ring]; -	lpfc_sli_abort_iocb_ring(phba, pring); +	lpfc_sli_abort_fcp_rings(phba);  	/*  	 * There was a firmware error. Take the hba offline and then @@ -1342,7 +1443,6 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)  {  	struct lpfc_vport *vport = phba->pport;  	struct lpfc_sli   *psli = &phba->sli; -	struct lpfc_sli_ring  *pring;  	uint32_t event_data;  	unsigned long temperature;  	struct temp_event temp_event_data; @@ -1394,8 +1494,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)  		* Error iocb (I/O) on txcmplq and let the SCSI layer  		* retry it after re-establishing link.  		*/ -		pring = &psli->ring[psli->fcp_ring]; -		lpfc_sli_abort_iocb_ring(phba, pring); +		lpfc_sli_abort_fcp_rings(phba);  		/*  		 * There was a firmware error.  Take the hba offline and then @@ -1468,7 +1567,8 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)   * for handling possible port resource change.   **/  static int -lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action) +lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, +			    bool en_rn_msg)  {  	int rc;  	uint32_t intr_mode; @@ -1480,9 +1580,10 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)  	rc = lpfc_sli4_pdev_status_reg_wait(phba);  	if (!rc) {  		/* need reset: attempt for port recovery */ -		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -				"2887 Reset Needed: Attempting Port " -				"Recovery...\n"); +		if (en_rn_msg) +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"2887 Reset Needed: Attempting Port " +					"Recovery...\n");  		lpfc_offline_prep(phba, mbx_action);  		lpfc_offline(phba);  		/* release interrupt for possible resource change */ @@ -1522,6 +1623,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)  	uint32_t reg_err1, reg_err2;  	uint32_t uerrlo_reg, uemasklo_reg;  	uint32_t pci_rd_rc1, pci_rd_rc2; +	bool en_rn_msg = true;  	int rc;  	/* If the pci channel is offline, ignore possible errors, since @@ -1572,10 +1674,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)  			break;  		}  		if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && -		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) +		    reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {  			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, -					"3143 Port Down: Firmware Restarted\n"); -		else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && +					"3143 Port Down: Firmware Update " +					"Detected\n"); +			en_rn_msg = false; +		} else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&  			 reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)  			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  					"3144 Port Down: Debug Dump\n"); @@ -1585,7 +1689,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)  					"3145 Port Down: Provisioning\n");  		/* Check port status register for function reset */ -		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT); +		rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT, +				en_rn_msg);  		if (rc == 0) {  			/* don't report event on forced debug dump */  			if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 && @@ -1928,78 +2033,81 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  	switch (dev_id) {  	case PCI_DEVICE_ID_FIREFLY: -		m = (typeof(m)){"LP6000", "PCI", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP6000", "PCI", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_SUPERFLY:  		if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3) -			m = (typeof(m)){"LP7000", "PCI", -					"Fibre Channel Adapter"}; +			m = (typeof(m)){"LP7000", "PCI", ""};  		else -			m = (typeof(m)){"LP7000E", "PCI", -					"Fibre Channel Adapter"}; +			m = (typeof(m)){"LP7000E", "PCI", ""}; +		m.function = "Obsolete, Unsupported Fibre Channel Adapter";  		break;  	case PCI_DEVICE_ID_DRAGONFLY:  		m = (typeof(m)){"LP8000", "PCI", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_CENTAUR:  		if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID) -			m = (typeof(m)){"LP9002", "PCI", -					"Fibre Channel Adapter"}; +			m = (typeof(m)){"LP9002", "PCI", ""};  		else -			m = (typeof(m)){"LP9000", "PCI", -					"Fibre Channel Adapter"}; +			m = (typeof(m)){"LP9000", "PCI", ""}; +		m.function = "Obsolete, Unsupported Fibre Channel Adapter";  		break;  	case PCI_DEVICE_ID_RFLY:  		m = (typeof(m)){"LP952", "PCI", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_PEGASUS:  		m = (typeof(m)){"LP9802", "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_THOR:  		m = (typeof(m)){"LP10000", "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_VIPER:  		m = (typeof(m)){"LPX1000",  "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_PFLY:  		m = (typeof(m)){"LP982", "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_TFLY:  		m = (typeof(m)){"LP1050", "PCI-X", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_HELIOS:  		m = (typeof(m)){"LP11000", "PCI-X2", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_HELIOS_SCSP:  		m = (typeof(m)){"LP11000-SP", "PCI-X2", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_HELIOS_DCSP:  		m = (typeof(m)){"LP11002-SP",  "PCI-X2", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_NEPTUNE: -		m = (typeof(m)){"LPe1000", "PCIe", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LPe1000", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_NEPTUNE_SCSP: -		m = (typeof(m)){"LPe1000-SP", "PCIe", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LPe1000-SP", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_NEPTUNE_DCSP: -		m = (typeof(m)){"LPe1002-SP", "PCIe", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LPe1002-SP", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_BMID:  		m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_BSMB: -		m = (typeof(m)){"LP111", "PCI-X2", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP111", "PCI-X2", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_ZEPHYR:  		m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"}; @@ -2018,16 +2126,20 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  		m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_LP101: -		m = (typeof(m)){"LP101", "PCI-X", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP101", "PCI-X", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_LP10000S: -		m = (typeof(m)){"LP10000-S", "PCI", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP10000-S", "PCI", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_LP11000S: -		m = (typeof(m)){"LP11000-S", "PCI-X2", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LP11000-S", "PCI-X2", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_LPE11000S: -		m = (typeof(m)){"LPe11000-S", "PCIe", "Fibre Channel Adapter"}; +		m = (typeof(m)){"LPe11000-S", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_SAT:  		m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"}; @@ -2048,20 +2160,21 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  		m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_HORNET: -		m = (typeof(m)){"LP21000", "PCIe", "FCoE Adapter"}; +		m = (typeof(m)){"LP21000", "PCIe", +				"Obsolete, Unsupported FCoE Adapter"};  		GE = 1;  		break;  	case PCI_DEVICE_ID_PROTEUS_VF:  		m = (typeof(m)){"LPev12000", "PCIe IOV", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_PROTEUS_PF:  		m = (typeof(m)){"LPev12000", "PCIe IOV", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_PROTEUS_S:  		m = (typeof(m)){"LPemv12002-S", "PCIe IOV", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_TIGERSHARK:  		oneConnect = 1; @@ -2077,17 +2190,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)  		break;  	case PCI_DEVICE_ID_BALIUS:  		m = (typeof(m)){"LPVe12002", "PCIe Shared I/O", -				"Fibre Channel Adapter"}; +				"Obsolete, Unsupported Fibre Channel Adapter"};  		break;  	case PCI_DEVICE_ID_LANCER_FC: -	case PCI_DEVICE_ID_LANCER_FC_VF:  		m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};  		break; +	case PCI_DEVICE_ID_LANCER_FC_VF: +		m = (typeof(m)){"LPe16000", "PCIe", +				"Obsolete, Unsupported Fibre Channel Adapter"}; +		break;  	case PCI_DEVICE_ID_LANCER_FCOE: -	case PCI_DEVICE_ID_LANCER_FCOE_VF:  		oneConnect = 1;  		m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};  		break; +	case PCI_DEVICE_ID_LANCER_FCOE_VF: +		oneConnect = 1; +		m = (typeof(m)){"OCe15100", "PCIe", +				"Obsolete, Unsupported FCoE"}; +		break;  	case PCI_DEVICE_ID_SKYHAWK:  	case PCI_DEVICE_ID_SKYHAWK_VF:  		oneConnect = 1; @@ -4545,7 +4665,7 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)  	pci_save_state(pdev);  	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */ -	if (pci_find_capability(pdev, PCI_CAP_ID_EXP)) +	if (pci_is_pcie(pdev))  		pdev->needs_freset = 1;  	return 0; @@ -4581,8 +4701,6 @@ lpfc_disable_pci_dev(struct lpfc_hba *phba)  	/* Release PCI resource and disable PCI device */  	pci_release_selected_regions(pdev, bars);  	pci_disable_device(pdev); -	/* Null out PCI private reference to driver */ -	pci_set_drvdata(pdev, NULL);  	return;  } @@ -4604,7 +4722,10 @@ lpfc_reset_hba(struct lpfc_hba *phba)  		phba->link_state = LPFC_HBA_ERROR;  		return;  	} -	lpfc_offline_prep(phba, LPFC_MBX_WAIT); +	if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) +		lpfc_offline_prep(phba, LPFC_MBX_WAIT); +	else +		lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);  	lpfc_offline(phba);  	lpfc_sli_brdrestart(phba);  	lpfc_online(phba); @@ -4858,6 +4979,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};  	struct lpfc_mqe *mqe;  	int longs; +	int fof_vectors = 0;  	/* Get all the module params for configuring this host */  	lpfc_get_cfgparam(phba); @@ -5063,6 +5185,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	rc = lpfc_sli4_read_config(phba);  	if (unlikely(rc))  		goto out_free_bsmbx; +	rc = lpfc_mem_alloc_active_rrq_pool_s4(phba); +	if (unlikely(rc)) +		goto out_free_bsmbx;  	/* IF Type 0 ports get initialized now. */  	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == @@ -5120,6 +5245,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  		}  	}  	mempool_free(mboxq, phba->mbox_mem_pool); + +	/* Verify OAS is supported */ +	lpfc_sli4_oas_verify(phba); +	if (phba->cfg_fof) +		fof_vectors = 1; +  	/* Verify all the SLI4 queues */  	rc = lpfc_sli4_queue_verify(phba);  	if (rc) @@ -5161,7 +5292,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	phba->sli4_hba.fcp_eq_hdl =  			kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * -			    phba->cfg_fcp_io_channel), GFP_KERNEL); +			    (fof_vectors + phba->cfg_fcp_io_channel)), +			    GFP_KERNEL);  	if (!phba->sli4_hba.fcp_eq_hdl) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"2572 Failed allocate memory for " @@ -5171,7 +5303,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)  	}  	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * -				      phba->cfg_fcp_io_channel), GFP_KERNEL); +				  (fof_vectors + +				   phba->cfg_fcp_io_channel)), GFP_KERNEL);  	if (!phba->sli4_hba.msix_entries) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"2573 Failed allocate memory for msi-x " @@ -5269,6 +5402,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)  	kfree(phba->sli4_hba.cpu_map);  	phba->sli4_hba.num_present_cpu = 0;  	phba->sli4_hba.num_online_cpu = 0; +	phba->sli4_hba.curr_disp_cpu = 0;  	/* Free memory allocated for msi-x interrupt vector entries */  	kfree(phba->sli4_hba.msix_entries); @@ -5392,6 +5526,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)  	/* Initialize FCF connection rec list */  	INIT_LIST_HEAD(&phba->fcf_conn_rec_list); +	/* Initialize OAS configuration list */ +	spin_lock_init(&phba->devicelock); +	INIT_LIST_HEAD(&phba->luns); +  	return 0;  } @@ -6818,6 +6956,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)  	int cfg_fcp_io_channel;  	uint32_t cpu;  	uint32_t i = 0; +	int fof_vectors = phba->cfg_fof ? 1 : 0;  	/*  	 * Sanity check for configured queue parameters against the run-time @@ -6834,6 +6973,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)  	}  	phba->sli4_hba.num_online_cpu = i;  	phba->sli4_hba.num_present_cpu = lpfc_present_cpu; +	phba->sli4_hba.curr_disp_cpu = 0;  	if (i < cfg_fcp_io_channel) {  		lpfc_printf_log(phba, @@ -6844,7 +6984,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)  		cfg_fcp_io_channel = i;  	} -	if (cfg_fcp_io_channel > +	if (cfg_fcp_io_channel + fof_vectors >  	    phba->sli4_hba.max_cfg_param.max_eq) {  		if (phba->sli4_hba.max_cfg_param.max_eq <  		    LPFC_FCP_IO_CHAN_MIN) { @@ -6861,7 +7001,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)  				"available EQs: from %d to %d\n",  				cfg_fcp_io_channel,  				phba->sli4_hba.max_cfg_param.max_eq); -		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq; +		cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq - +			fof_vectors;  	}  	/* The actual number of FCP event queues adopted */ @@ -7072,6 +7213,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)  	}  	phba->sli4_hba.dat_rq = qdesc; +	/* Create the Queues needed for Flash Optimized Fabric operations */ +	if (phba->cfg_fof) +		lpfc_fof_queue_create(phba);  	return 0;  out_error: @@ -7096,6 +7240,9 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)  {  	int idx; +	if (phba->cfg_fof) +		lpfc_fof_queue_destroy(phba); +  	if (phba->sli4_hba.hba_eq != NULL) {  		/* Release HBA event queue */  		for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) { @@ -7480,8 +7627,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)  			phba->sli4_hba.hdr_rq->queue_id,  			phba->sli4_hba.dat_rq->queue_id,  			phba->sli4_hba.els_cq->queue_id); + +	if (phba->cfg_fof) { +		rc = lpfc_fof_queue_setup(phba); +		if (rc) { +			lpfc_printf_log(phba, KERN_ERR, LOG_INIT, +					"0549 Failed setup of FOF Queues: " +					"rc = 0x%x\n", rc); +			goto out_destroy_els_rq; +		} +	}  	return 0; +out_destroy_els_rq: +	lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);  out_destroy_els_wq:  	lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);  out_destroy_mbx_wq: @@ -7520,6 +7679,9 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)  {  	int fcp_qidx; +	/* Unset the queues created for Flash Optimized Fabric operations */ +	if (phba->cfg_fof) +		lpfc_fof_queue_destroy(phba);  	/* Unset mailbox command work queue */  	lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);  	/* Unset ELS work queue */ @@ -8637,6 +8799,10 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)  	/* Configure MSI-X capability structure */  	vectors = phba->cfg_fcp_io_channel; +	if (phba->cfg_fof) { +		phba->sli4_hba.msix_entries[index].entry = index; +		vectors++; +	}  enable_msix_vectors:  	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,  			     vectors); @@ -8666,7 +8832,15 @@ enable_msix_vectors:  		phba->sli4_hba.fcp_eq_hdl[index].idx = index;  		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;  		atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1); -		rc = request_irq(phba->sli4_hba.msix_entries[index].vector, +		if (phba->cfg_fof && (index == (vectors - 1))) +			rc = request_irq( +				phba->sli4_hba.msix_entries[index].vector, +				 &lpfc_sli4_fof_intr_handler, IRQF_SHARED, +				 (char *)&phba->sli4_hba.handler_name[index], +				 &phba->sli4_hba.fcp_eq_hdl[index]); +		else +			rc = request_irq( +				phba->sli4_hba.msix_entries[index].vector,  				 &lpfc_sli4_hba_intr_handler, IRQF_SHARED,  				 (char *)&phba->sli4_hba.handler_name[index],  				 &phba->sli4_hba.fcp_eq_hdl[index]); @@ -8678,6 +8852,9 @@ enable_msix_vectors:  		}  	} +	if (phba->cfg_fof) +		vectors--; +  	if (vectors != phba->cfg_fcp_io_channel) {  		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  				"3238 Reducing IO channels to match number of " @@ -8723,7 +8900,10 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba)  		free_irq(phba->sli4_hba.msix_entries[index].vector,  			 &phba->sli4_hba.fcp_eq_hdl[index]);  	} - +	if (phba->cfg_fof) { +		free_irq(phba->sli4_hba.msix_entries[index].vector, +			 &phba->sli4_hba.fcp_eq_hdl[index]); +	}  	/* Disable MSI-X */  	pci_disable_msix(phba->pcidev); @@ -8773,6 +8953,10 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)  		phba->sli4_hba.fcp_eq_hdl[index].phba = phba;  	} +	if (phba->cfg_fof) { +		phba->sli4_hba.fcp_eq_hdl[index].idx = index; +		phba->sli4_hba.fcp_eq_hdl[index].phba = phba; +	}  	return 0;  } @@ -8855,6 +9039,12 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)  				atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].  					fcp_eq_in_use, 1);  			} +			if (phba->cfg_fof) { +				phba->sli4_hba.fcp_eq_hdl[index].idx = index; +				phba->sli4_hba.fcp_eq_hdl[index].phba = phba; +				atomic_set(&phba->sli4_hba.fcp_eq_hdl[index]. +					fcp_eq_in_use, 1); +			}  		}  	}  	return intr_mode; @@ -9165,6 +9355,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)  		phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;  	sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;  	sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters); +	sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);  	sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);  	sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);  	sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters); @@ -9429,7 +9620,6 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)  	/* Disable interrupt */  	lpfc_sli_disable_intr(phba); -	pci_set_drvdata(pdev, NULL);  	scsi_host_put(shost);  	/* @@ -9584,9 +9774,6 @@ lpfc_pci_resume_one_s3(struct pci_dev *pdev)  static void  lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)  { -	struct lpfc_sli *psli = &phba->sli; -	struct lpfc_sli_ring  *pring; -  	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  			"2723 PCI channel I/O abort preparing for recovery\n"); @@ -9594,8 +9781,7 @@ lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)  	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq  	 * and let the SCSI mid-layer to retry them to recover.  	 */ -	pring = &psli->ring[psli->fcp_ring]; -	lpfc_sli_abort_iocb_ring(phba, pring); +	lpfc_sli_abort_fcp_rings(phba);  }  /** @@ -10338,17 +10524,13 @@ lpfc_pci_resume_one_s4(struct pci_dev *pdev)  static void  lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)  { -	struct lpfc_sli *psli = &phba->sli; -	struct lpfc_sli_ring  *pring; -  	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,  			"2828 PCI channel I/O abort preparing for recovery\n");  	/*  	 * There may be errored I/Os through HBA, abort all I/Os on txcmplq  	 * and let the SCSI mid-layer to retry them to recover.  	 */ -	pring = &psli->ring[psli->fcp_ring]; -	lpfc_sli_abort_iocb_ring(phba, pring); +	lpfc_sli_abort_fcp_rings(phba);  }  /** @@ -10799,6 +10981,168 @@ lpfc_io_resume(struct pci_dev *pdev)  	return;  } +/** + * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter + * @phba: pointer to lpfc hba data structure. + * + * This routine checks to see if OAS is supported for this adapter. If + * supported, the configure Flash Optimized Fabric flag is set.  Otherwise, + * the enable oas flag is cleared and the pool created for OAS device data + * is destroyed. + * + **/ +void +lpfc_sli4_oas_verify(struct lpfc_hba *phba) +{ + +	if (!phba->cfg_EnableXLane) +		return; + +	if (phba->sli4_hba.pc_sli4_params.oas_supported) { +		phba->cfg_fof = 1; +	} else { +		phba->cfg_fof = 0; +		if (phba->device_data_mem_pool) +			mempool_destroy(phba->device_data_mem_pool); +		phba->device_data_mem_pool = NULL; +	} + +	return; +} + +/** + * lpfc_fof_queue_setup - Set up all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to set up all the fof queues for the FC HBA + * operation. + * + * Return codes + *      0 - successful + *      -ENOMEM - No available memory + **/ +int +lpfc_fof_queue_setup(struct lpfc_hba *phba) +{ +	struct lpfc_sli *psli = &phba->sli; +	int rc; + +	rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX); +	if (rc) +		return -ENOMEM; + +	if (phba->cfg_fof) { + +		rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq, +				    phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP); +		if (rc) +			goto out_oas_cq; + +		rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq, +				    phba->sli4_hba.oas_cq, LPFC_FCP); +		if (rc) +			goto out_oas_wq; + +		phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING]; +		phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING]; +	} + +	return 0; + +out_oas_wq: +	lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq); +out_oas_cq: +	lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq); +	return rc; + +} + +/** + * lpfc_fof_queue_create - Create all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate all the fof queues for the FC HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + *      0 - successful + *      -ENOMEM - No availble memory + *      -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_fof_queue_create(struct lpfc_hba *phba) +{ +	struct lpfc_queue *qdesc; + +	/* Create FOF EQ */ +	qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, +				      phba->sli4_hba.eq_ecount); +	if (!qdesc) +		goto out_error; + +	phba->sli4_hba.fof_eq = qdesc; + +	if (phba->cfg_fof) { + +		/* Create OAS CQ */ +		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, +						      phba->sli4_hba.cq_ecount); +		if (!qdesc) +			goto out_error; + +		phba->sli4_hba.oas_cq = qdesc; + +		/* Create OAS WQ */ +		qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, +					      phba->sli4_hba.wq_ecount); +		if (!qdesc) +			goto out_error; + +		phba->sli4_hba.oas_wq = qdesc; + +	} +	return 0; + +out_error: +	lpfc_fof_queue_destroy(phba); +	return -ENOMEM; +} + +/** + * lpfc_fof_queue_destroy - Destroy all the fof queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to release all the SLI4 queues with the FC HBA + * operation. + * + * Return codes + *      0 - successful + **/ +int +lpfc_fof_queue_destroy(struct lpfc_hba *phba) +{ +	/* Release FOF Event queue */ +	if (phba->sli4_hba.fof_eq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.fof_eq); +		phba->sli4_hba.fof_eq = NULL; +	} + +	/* Release OAS Completion queue */ +	if (phba->sli4_hba.oas_cq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.oas_cq); +		phba->sli4_hba.oas_cq = NULL; +	} + +	/* Release OAS Work queue */ +	if (phba->sli4_hba.oas_wq != NULL) { +		lpfc_sli4_queue_free(phba->sli4_hba.oas_wq); +		phba->sli4_hba.oas_wq = NULL; +	} +	return 0; +} +  static struct pci_device_id lpfc_id_table[] = {  	{PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,  		PCI_ANY_ID, PCI_ANY_ID, }, diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 812d0cd7c86..3fa65338d3f 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2012 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   * Portions Copyright (C) 2004-2005 Christoph Hellwig              * @@ -38,10 +38,29 @@  #include "lpfc_scsi.h"  #include "lpfc.h"  #include "lpfc_crtn.h" +#include "lpfc_logmsg.h"  #define LPFC_MBUF_POOL_SIZE     64      /* max elements in MBUF safety pool */  #define LPFC_MEM_POOL_SIZE      64      /* max elem in non-DMA safety pool */ +#define LPFC_DEVICE_DATA_POOL_SIZE 64   /* max elements in device data pool */ +int +lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { +	size_t bytes; +	int max_xri = phba->sli4_hba.max_cfg_param.max_xri; + +	if (max_xri <= 0) +		return -ENOMEM; +	bytes = ((BITS_PER_LONG - 1 + max_xri) / BITS_PER_LONG) * +		  sizeof(unsigned long); +	phba->cfg_rrq_xri_bitmap_sz = bytes; +	phba->active_rrq_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, +							    bytes); +	if (!phba->active_rrq_pool) +		return -ENOMEM; +	else +		return 0; +}  /**   * lpfc_mem_alloc - create and allocate all PCI and memory pools @@ -146,6 +165,16 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align)  		phba->lpfc_drb_pool = NULL;  	} +	if (phba->cfg_EnableXLane) { +		phba->device_data_mem_pool = mempool_create_kmalloc_pool( +					LPFC_DEVICE_DATA_POOL_SIZE, +					sizeof(struct lpfc_device_data)); +		if (!phba->device_data_mem_pool) +			goto fail_free_hrb_pool; +	} else { +		phba->device_data_mem_pool = NULL; +	} +  	return 0;   fail_free_hrb_pool:  	pci_pool_destroy(phba->lpfc_hrb_pool); @@ -188,6 +217,7 @@ lpfc_mem_free(struct lpfc_hba *phba)  {  	int i;  	struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; +	struct lpfc_device_data *device_data;  	/* Free HBQ pools */  	lpfc_sli_hbqbuf_free_all(phba); @@ -209,6 +239,10 @@ lpfc_mem_free(struct lpfc_hba *phba)  	/* Free NLP memory pool */  	mempool_destroy(phba->nlp_mem_pool);  	phba->nlp_mem_pool = NULL; +	if (phba->sli_rev == LPFC_SLI_REV4 && phba->active_rrq_pool) { +		mempool_destroy(phba->active_rrq_pool); +		phba->active_rrq_pool = NULL; +	}  	/* Free mbox memory pool */  	mempool_destroy(phba->mbox_mem_pool); @@ -227,6 +261,19 @@ lpfc_mem_free(struct lpfc_hba *phba)  	pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);  	phba->lpfc_scsi_dma_buf_pool = NULL; +	/* Free Device Data memory pool */ +	if (phba->device_data_mem_pool) { +		/* Ensure all objects have been returned to the pool */ +		while (!list_empty(&phba->luns)) { +			device_data = list_first_entry(&phba->luns, +						       struct lpfc_device_data, +						       listentry); +			list_del(&device_data->listentry); +			mempool_free(device_data, phba->device_data_mem_pool); +		} +		mempool_destroy(phba->device_data_mem_pool); +	} +	phba->device_data_mem_pool = NULL;  	return;  } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index abc361259d6..c342f6afd74 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -203,8 +203,6 @@ lpfc_check_elscmpl_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,  int  lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)  { -	LIST_HEAD(completions); -	LIST_HEAD(txcmplq_completions);  	LIST_HEAD(abort_list);  	struct lpfc_sli  *psli = &phba->sli;  	struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING]; @@ -216,32 +214,27 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)  			 "Data: x%x x%x x%x\n",  			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,  			 ndlp->nlp_rpi); - +	/* Clean up all fabric IOs first.*/  	lpfc_fabric_abort_nport(ndlp); -	/* First check the txq */ +	/* +	 * Lock the ELS ring txcmplq for SLI3/SLI4 and build a local list +	 * of all ELS IOs that need an ABTS.  The IOs need to stay on the +	 * txcmplq so that the abort operation completes them successfully. +	 */  	spin_lock_irq(&phba->hbalock); -	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { -		/* Check to see if iocb matches the nport we are looking for */ -		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { -			/* It matches, so deque and call compl with anp error */ -			list_move_tail(&iocb->list, &completions); -		} -	} - -	/* Next check the txcmplq */ -	list_splice_init(&pring->txcmplq, &txcmplq_completions); -	spin_unlock_irq(&phba->hbalock); - -	list_for_each_entry_safe(iocb, next_iocb, &txcmplq_completions, list) { -		/* Check to see if iocb matches the nport we are looking for */ +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_lock(&pring->ring_lock); +	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { +	/* Add to abort_list on on NDLP match. */  		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))  			list_add_tail(&iocb->dlist, &abort_list);  	} -	spin_lock_irq(&phba->hbalock); -	list_splice(&txcmplq_completions, &pring->txcmplq); +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_unlock(&pring->ring_lock);  	spin_unlock_irq(&phba->hbalock); +	/* Abort the targeted IOs and remove them from the abort list. */  	list_for_each_entry_safe(iocb, next_iocb, &abort_list, dlist) {  			spin_lock_irq(&phba->hbalock);  			list_del_init(&iocb->dlist); @@ -249,9 +242,28 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)  			spin_unlock_irq(&phba->hbalock);  	} +	INIT_LIST_HEAD(&abort_list); + +	/* Now process the txq */ +	spin_lock_irq(&phba->hbalock); +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_lock(&pring->ring_lock); + +	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { +		/* Check to see if iocb matches the nport we are looking for */ +		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp)) { +			list_del_init(&iocb->list); +			list_add_tail(&iocb->list, &abort_list); +		} +	} + +	if (phba->sli_rev == LPFC_SLI_REV4) +		spin_unlock(&pring->ring_lock); +	spin_unlock_irq(&phba->hbalock); +  	/* Cancel all the IOCBs from the completions list */ -	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, -			      IOERR_SLI_ABORTED); +	lpfc_sli_cancel_iocbs(phba, &abort_list, +			      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);  	lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);  	return 0; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index c913e8cc3b2..2df11daad85 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   * Portions Copyright (C) 2004-2005 Christoph Hellwig              * @@ -68,6 +68,17 @@ struct scsi_dif_tuple {  	__be32 ref_tag;         /* Target LBA or indirect LBA */  }; +static struct lpfc_rport_data * +lpfc_rport_data_from_scsi_device(struct scsi_device *sdev) +{ +	struct lpfc_vport *vport = (struct lpfc_vport *)sdev->host->hostdata; + +	if (vport->phba->cfg_fof) +		return ((struct lpfc_device_data *)sdev->hostdata)->rport_data; +	else +		return (struct lpfc_rport_data *)sdev->hostdata; +} +  static void  lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);  static void @@ -304,9 +315,27 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)  	unsigned long new_queue_depth, old_queue_depth;  	old_queue_depth = sdev->queue_depth; -	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + +	switch (reason) { +	case SCSI_QDEPTH_DEFAULT: +		/* change request from sysfs, fall through */ +	case SCSI_QDEPTH_RAMP_UP: +		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); +		break; +	case SCSI_QDEPTH_QFULL: +		if (scsi_track_queue_full(sdev, qdepth) == 0) +			return sdev->queue_depth; + +		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, +				 "0711 detected queue full - lun queue " +				 "depth adjusted to %d.\n", sdev->queue_depth); +		break; +	default: +		return -EOPNOTSUPP; +	} +  	new_queue_depth = sdev->queue_depth; -	rdata = sdev->hostdata; +	rdata = lpfc_rport_data_from_scsi_device(sdev);  	if (rdata)  		lpfc_send_sdev_queuedepth_change_event(phba, vport,  						       rdata->pnode, sdev->lun, @@ -377,50 +406,6 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)  }  /** - * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread - * @phba: The Hba for which this call is being executed. - * - * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine - * post at most 1 event every 5 minute after last_ramp_up_time or - * last_rsrc_error_time.  This routine wakes up worker thread of @phba - * to process WORKER_RAM_DOWN_EVENT event. - * - * This routine should be called with no lock held. - **/ -static inline void -lpfc_rampup_queue_depth(struct lpfc_vport  *vport, -			uint32_t queue_depth) -{ -	unsigned long flags; -	struct lpfc_hba *phba = vport->phba; -	uint32_t evt_posted; -	atomic_inc(&phba->num_cmd_success); - -	if (vport->cfg_lun_queue_depth <= queue_depth) -		return; -	spin_lock_irqsave(&phba->hbalock, flags); -	if (time_before(jiffies, -			phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) || -	    time_before(jiffies, -			phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL)) { -		spin_unlock_irqrestore(&phba->hbalock, flags); -		return; -	} -	phba->last_ramp_up_time = jiffies; -	spin_unlock_irqrestore(&phba->hbalock, flags); - -	spin_lock_irqsave(&phba->pport->work_port_lock, flags); -	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE; -	if (!evt_posted) -		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE; -	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags); - -	if (!evt_posted) -		lpfc_worker_wake_up(phba); -	return; -} - -/**   * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler   * @phba: The Hba for which this call is being executed.   * @@ -472,41 +457,6 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)  }  /** - * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler - * @phba: The Hba for which this call is being executed. - * - * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker - * thread.This routine increases queue depth for all scsi device on each vport - * associated with @phba by 1. This routine also sets @phba num_rsrc_err and - * num_cmd_success to zero. - **/ -void -lpfc_ramp_up_queue_handler(struct lpfc_hba *phba) -{ -	struct lpfc_vport **vports; -	struct Scsi_Host  *shost; -	struct scsi_device *sdev; -	int i; - -	vports = lpfc_create_vport_work_array(phba); -	if (vports != NULL) -		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { -			shost = lpfc_shost_from_vport(vports[i]); -			shost_for_each_device(sdev, shost) { -				if (vports[i]->cfg_lun_queue_depth <= -				    sdev->queue_depth) -					continue; -				lpfc_change_queue_depth(sdev, -							sdev->queue_depth+1, -							SCSI_QDEPTH_RAMP_UP); -			} -		} -	lpfc_destroy_vport_work_array(phba, vports); -	atomic_set(&phba->num_rsrc_err, 0); -	atomic_set(&phba->num_cmd_success, 0); -} - -/**   * lpfc_scsi_dev_block - set all scsi hosts to block state   * @phba: Pointer to HBA context object.   * @@ -1012,20 +962,25 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)  			break;  		} -		/* Allocate iotag for psb->cur_iocbq. */ -		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); -		if (iotag == 0) { + +		lxri = lpfc_sli4_next_xritag(phba); +		if (lxri == NO_XRI) {  			pci_pool_free(phba->lpfc_scsi_dma_buf_pool, -				psb->data, psb->dma_handle); +			      psb->data, psb->dma_handle);  			kfree(psb);  			break;  		} -		lxri = lpfc_sli4_next_xritag(phba); -		if (lxri == NO_XRI) { +		/* Allocate iotag for psb->cur_iocbq. */ +		iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); +		if (iotag == 0) {  			pci_pool_free(phba->lpfc_scsi_dma_buf_pool, -			      psb->data, psb->dma_handle); +				psb->data, psb->dma_handle);  			kfree(psb); +			lpfc_printf_log(phba, KERN_ERR, LOG_FCP, +					"3368 Failed to allocated IOTAG for" +					" XRI:0x%x\n", lxri); +			lpfc_sli4_free_xri(phba, lxri);  			break;  		}  		psb->cur_iocbq.sli4_lxritag = lxri; @@ -1497,7 +1452,7 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,  	}  	/* Next check if we need to match the remote NPortID or WWPN */ -	rdata = sc->device->hostdata; +	rdata = lpfc_rport_data_from_scsi_device(sc->device);  	if (rdata && rdata->pnode) {  		ndlp = rdata->pnode; @@ -3502,6 +3457,14 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)  	 * we need to set word 4 of IOCB here  	 */  	iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd); + +	/* +	 * If the OAS driver feature is enabled and the lun is enabled for +	 * OAS, set the oas iocb related flags. +	 */ +	if ((phba->cfg_fof) && ((struct lpfc_device_data *) +		scsi_cmnd->device->hostdata)->oas_enabled) +		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;  	return 0;  } @@ -4016,7 +3979,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,  	struct lpfc_nodelist *pnode = rdata->pnode;  	struct scsi_cmnd *cmd;  	int result; -	struct scsi_device *tmp_sdev;  	int depth;  	unsigned long flags;  	struct lpfc_fast_path_event *fast_path_evt; @@ -4261,32 +4223,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,  		return;  	} -	if (!result) -		lpfc_rampup_queue_depth(vport, queue_depth); - -	/* -	 * Check for queue full.  If the lun is reporting queue full, then -	 * back off the lun queue depth to prevent target overloads. -	 */ -	if (result == SAM_STAT_TASK_SET_FULL && pnode && -	    NLP_CHK_NODE_ACT(pnode)) { -		shost_for_each_device(tmp_sdev, shost) { -			if (tmp_sdev->id != scsi_id) -				continue; -			depth = scsi_track_queue_full(tmp_sdev, -						      tmp_sdev->queue_depth-1); -			if (depth <= 0) -				continue; -			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, -					 "0711 detected queue full - lun queue " -					 "depth adjusted to %d.\n", depth); -			lpfc_send_sdev_queuedepth_change_event(phba, vport, -							       pnode, -							       tmp_sdev->lun, -							       depth+1, depth); -		} -	} -  	spin_lock_irqsave(&phba->hbalock, flags);  	lpfc_cmd->pCmd = NULL;  	spin_unlock_irqrestore(&phba->hbalock, flags); @@ -4378,6 +4314,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,  		fcp_cmnd->fcpCntl1 = SIMPLE_Q;  	sli4 = (phba->sli_rev == LPFC_SLI_REV4); +	piocbq->iocb.un.fcpi.fcpi_XRdy = 0;  	/*  	 * There are three possibilities here - use scatter-gather segment, use @@ -4485,10 +4422,10 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,  		piocb->ulpContext =  		  vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];  	} -	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) { -		piocb->ulpFCP2Rcvy = 1; -	} +	piocb->ulpFCP2Rcvy = (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) ? 1 : 0;  	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f); +	piocb->ulpPU = 0; +	piocb->un.fcpi.fcpi_parm = 0;  	/* ulpTimeout is only one byte */  	if (lpfc_cmd->timeout > 0xff) { @@ -4688,12 +4625,13 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)  {  	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;  	struct lpfc_hba   *phba = vport->phba; -	struct lpfc_rport_data *rdata = cmnd->device->hostdata; +	struct lpfc_rport_data *rdata;  	struct lpfc_nodelist *ndlp;  	struct lpfc_scsi_buf *lpfc_cmd;  	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));  	int err; +	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);  	err = fc_remote_port_chkready(rport);  	if (err) {  		cmnd->result = err; @@ -4779,6 +4717,24 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)  				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);  	if (err) {  		atomic_dec(&ndlp->cmd_pending); +		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, +				 "3376 FCP could not issue IOCB err %x" +				 "FCP cmd x%x <%d/%d> " +				 "sid: x%x did: x%x oxid: x%x " +				 "Data: x%x x%x x%x x%x\n", +				 err, cmnd->cmnd[0], +				 cmnd->device ? cmnd->device->id : 0xffff, +				 cmnd->device ? cmnd->device->lun : 0xffff, +				 vport->fc_myDID, ndlp->nlp_DID, +				 phba->sli_rev == LPFC_SLI_REV4 ? +				 lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff, +				 lpfc_cmd->cur_iocbq.iocb.ulpContext, +				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag, +				 lpfc_cmd->cur_iocbq.iocb.ulpTimeout, +				 (uint32_t) +				 (cmnd->request->timeout / 1000)); + +  		goto out_host_busy_free_buf;  	}  	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { @@ -4827,7 +4783,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)  	struct lpfc_scsi_buf *lpfc_cmd;  	IOCB_t *cmd, *icmd;  	int ret = SUCCESS, status = 0; -	unsigned long flags; +	struct lpfc_sli_ring *pring_s4; +	int ring_number, ret_val; +	unsigned long flags, iflags;  	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);  	status = fc_block_scsi_eh(cmnd); @@ -4878,6 +4836,14 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)  	BUG_ON(iocb->context1 != lpfc_cmd); +	/* abort issued in recovery is still in progress */ +	if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { +		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, +			 "3389 SCSI Layer I/O Abort Request is pending\n"); +		spin_unlock_irqrestore(&phba->hbalock, flags); +		goto wait_for_cmpl; +	} +  	abtsiocb = __lpfc_sli_get_iocbq(phba);  	if (abtsiocb == NULL) {  		ret = FAILED; @@ -4916,11 +4882,23 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)  	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;  	abtsiocb->vport = vport; +	if (phba->sli_rev == LPFC_SLI_REV4) { +		ring_number = MAX_SLI3_CONFIGURED_RINGS + iocb->fcp_wqidx; +		pring_s4 = &phba->sli.ring[ring_number]; +		/* Note: both hbalock and ring_lock must be set here */ +		spin_lock_irqsave(&pring_s4->ring_lock, iflags); +		ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, +						abtsiocb, 0); +		spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); +	} else { +		ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, +						abtsiocb, 0); +	}  	/* no longer need the lock after this point */  	spin_unlock_irqrestore(&phba->hbalock, flags); -	if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) == -	    IOCB_ERROR) { + +	if (ret_val == IOCB_ERROR) {  		lpfc_sli_release_iocbq(phba, abtsiocb);  		ret = FAILED;  		goto out; @@ -4930,12 +4908,16 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)  		lpfc_sli_handle_fast_ring_event(phba,  			&phba->sli.ring[LPFC_FCP_RING], HA_R0RE_REQ); +wait_for_cmpl:  	lpfc_cmd->waitq = &waitq;  	/* Wait for abort to complete */  	wait_event_timeout(waitq,  			  (lpfc_cmd->pCmd != cmnd),  			   msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); + +	spin_lock_irqsave(shost->host_lock, flags);  	lpfc_cmd->waitq = NULL; +	spin_unlock_irqrestore(shost->host_lock, flags);  	if (lpfc_cmd->pCmd == cmnd) {  		ret = FAILED; @@ -4981,6 +4963,73 @@ lpfc_taskmgmt_name(uint8_t task_mgmt_cmd)  	}  } + +/** + * lpfc_check_fcp_rsp - check the returned fcp_rsp to see if task failed + * @vport: The virtual port for which this call is being executed. + * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure. + * + * This routine checks the FCP RSP INFO to see if the tsk mgmt command succeded + * + * Return code : + *   0x2003 - Error + *   0x2002 - Success + **/ +static int +lpfc_check_fcp_rsp(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd) +{ +	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp; +	uint32_t rsp_info; +	uint32_t rsp_len; +	uint8_t  rsp_info_code; +	int ret = FAILED; + + +	if (fcprsp == NULL) +		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, +				 "0703 fcp_rsp is missing\n"); +	else { +		rsp_info = fcprsp->rspStatus2; +		rsp_len = be32_to_cpu(fcprsp->rspRspLen); +		rsp_info_code = fcprsp->rspInfo3; + + +		lpfc_printf_vlog(vport, KERN_INFO, +				 LOG_FCP, +				 "0706 fcp_rsp valid 0x%x," +				 " rsp len=%d code 0x%x\n", +				 rsp_info, +				 rsp_len, rsp_info_code); + +		if ((fcprsp->rspStatus2&RSP_LEN_VALID) && (rsp_len == 8)) { +			switch (rsp_info_code) { +			case RSP_NO_FAILURE: +				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, +						 "0715 Task Mgmt No Failure\n"); +				ret = SUCCESS; +				break; +			case RSP_TM_NOT_SUPPORTED: /* TM rejected */ +				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, +						 "0716 Task Mgmt Target " +						"reject\n"); +				break; +			case RSP_TM_NOT_COMPLETED: /* TM failed */ +				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, +						 "0717 Task Mgmt Target " +						"failed TM\n"); +				break; +			case RSP_TM_INVALID_LU: /* TM to invalid LU! */ +				lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, +						 "0718 Task Mgmt to invalid " +						"LUN\n"); +				break; +			} +		} +	} +	return ret; +} + +  /**   * lpfc_send_taskmgmt - Generic SCSI Task Mgmt Handler   * @vport: The virtual port for which this call is being executed. @@ -5042,12 +5091,8 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,  	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,  					  iocbq, iocbqrsp, lpfc_cmd->timeout); -	if (status != IOCB_SUCCESS) { -		if (status == IOCB_TIMEDOUT) { -			ret = TIMEOUT_ERROR; -		} else -			ret = FAILED; -		lpfc_cmd->status = IOSTAT_DRIVER_REJECT; +	if ((status != IOCB_SUCCESS) || +	    (iocbqrsp->iocb.ulpStatus != IOSTAT_SUCCESS)) {  		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,  			 "0727 TMF %s to TGT %d LUN %d failed (%d, %d) "  			 "iocb_flag x%x\n", @@ -5055,9 +5100,21 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,  			 tgt_id, lun_id, iocbqrsp->iocb.ulpStatus,  			 iocbqrsp->iocb.un.ulpWord[4],  			 iocbq->iocb_flag); -	} else if (status == IOCB_BUSY) -		ret = FAILED; -	else +		/* if ulpStatus != IOCB_SUCCESS, then status == IOCB_SUCCESS */ +		if (status == IOCB_SUCCESS) { +			if (iocbqrsp->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) +				/* Something in the FCP_RSP was invalid. +				 * Check conditions */ +				ret = lpfc_check_fcp_rsp(vport, lpfc_cmd); +			else +				ret = FAILED; +		} else if (status == IOCB_TIMEDOUT) { +			ret = TIMEOUT_ERROR; +		} else { +			ret = FAILED; +		} +		lpfc_cmd->status = IOSTAT_DRIVER_REJECT; +	} else  		ret = SUCCESS;  	lpfc_sli_release_iocbq(phba, iocbqrsp); @@ -5083,10 +5140,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,  static int  lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)  { -	struct lpfc_rport_data *rdata = cmnd->device->hostdata; +	struct lpfc_rport_data *rdata;  	struct lpfc_nodelist *pnode;  	unsigned long later; +	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);  	if (!rdata) {  		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,  			"0797 Tgt Map rport failure: rdata x%p\n", rdata); @@ -5104,7 +5162,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd)  		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)  			return SUCCESS;  		schedule_timeout_uninterruptible(msecs_to_jiffies(500)); -		rdata = cmnd->device->hostdata; +		rdata = lpfc_rport_data_from_scsi_device(cmnd->device);  		if (!rdata)  			return FAILED;  		pnode = rdata->pnode; @@ -5141,8 +5199,9 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,  	cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);  	if (cnt) -		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], -				    tgt_id, lun_id, context); +		lpfc_sli_abort_taskmgmt(vport, +					&phba->sli.ring[phba->sli.fcp_ring], +					tgt_id, lun_id, context);  	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;  	while (time_after(later, jiffies) && cnt) {  		schedule_timeout_uninterruptible(msecs_to_jiffies(20)); @@ -5176,13 +5235,14 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)  {  	struct Scsi_Host  *shost = cmnd->device->host;  	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; -	struct lpfc_rport_data *rdata = cmnd->device->hostdata; +	struct lpfc_rport_data *rdata;  	struct lpfc_nodelist *pnode;  	unsigned tgt_id = cmnd->device->id;  	unsigned int lun_id = cmnd->device->lun;  	struct lpfc_scsi_event_header scsi_event; -	int status, ret = SUCCESS; +	int status; +	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);  	if (!rdata) {  		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,  			"0798 Device Reset rport failure: rdata x%p\n", rdata); @@ -5222,9 +5282,11 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)  	 * So, continue on.  	 * We will report success if all the i/o aborts successfully.  	 */ -	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, +	if (status == SUCCESS) +		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,  						LPFC_CTX_LUN); -	return ret; + +	return status;  }  /** @@ -5243,13 +5305,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)  {  	struct Scsi_Host  *shost = cmnd->device->host;  	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; -	struct lpfc_rport_data *rdata = cmnd->device->hostdata; +	struct lpfc_rport_data *rdata;  	struct lpfc_nodelist *pnode;  	unsigned tgt_id = cmnd->device->id;  	unsigned int lun_id = cmnd->device->lun;  	struct lpfc_scsi_event_header scsi_event; -	int status, ret = SUCCESS; +	int status; +	rdata = lpfc_rport_data_from_scsi_device(cmnd->device);  	if (!rdata) {  		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,  			"0799 Target Reset rport failure: rdata x%p\n", rdata); @@ -5289,9 +5352,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)  	 * So, continue on.  	 * We will report success if all the i/o aborts successfully.  	 */ -	ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id, +	if (status == SUCCESS) +		status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,  					  LPFC_CTX_TGT); -	return ret; +	return status;  }  /** @@ -5448,11 +5512,45 @@ lpfc_slave_alloc(struct scsi_device *sdev)  	uint32_t num_to_alloc = 0;  	int num_allocated = 0;  	uint32_t sdev_cnt; +	struct lpfc_device_data *device_data; +	unsigned long flags; +	struct lpfc_name target_wwpn;  	if (!rport || fc_remote_port_chkready(rport))  		return -ENXIO; -	sdev->hostdata = rport->dd_data; +	if (phba->cfg_fof) { + +		/* +		 * Check to see if the device data structure for the lun +		 * exists.  If not, create one. +		 */ + +		u64_to_wwn(rport->port_name, target_wwpn.u.wwn); +		spin_lock_irqsave(&phba->devicelock, flags); +		device_data = __lpfc_get_device_data(phba, +						     &phba->luns, +						     &vport->fc_portname, +						     &target_wwpn, +						     sdev->lun); +		if (!device_data) { +			spin_unlock_irqrestore(&phba->devicelock, flags); +			device_data = lpfc_create_device_data(phba, +							&vport->fc_portname, +							&target_wwpn, +							sdev->lun, true); +			if (!device_data) +				return -ENOMEM; +			spin_lock_irqsave(&phba->devicelock, flags); +			list_add_tail(&device_data->listentry, &phba->luns); +		} +		device_data->rport_data = rport->dd_data; +		device_data->available = true; +		spin_unlock_irqrestore(&phba->devicelock, flags); +		sdev->hostdata = device_data; +	} else { +		sdev->hostdata = rport->dd_data; +	}  	sdev_cnt = atomic_inc_return(&phba->sdev_cnt);  	/* @@ -5542,11 +5640,344 @@ lpfc_slave_destroy(struct scsi_device *sdev)  {  	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;  	struct lpfc_hba   *phba = vport->phba; +	unsigned long flags; +	struct lpfc_device_data *device_data = sdev->hostdata; +  	atomic_dec(&phba->sdev_cnt); +	if ((phba->cfg_fof) && (device_data)) { +		spin_lock_irqsave(&phba->devicelock, flags); +		device_data->available = false; +		if (!device_data->oas_enabled) +			lpfc_delete_device_data(phba, device_data); +		spin_unlock_irqrestore(&phba->devicelock, flags); +	}  	sdev->hostdata = NULL;  	return;  } +/** + * lpfc_create_device_data - creates and initializes device data structure for OAS + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun on target + * @atomic_create: Flag to indicate if memory should be allocated using the + *		  GFP_ATOMIC flag or not. + * + * This routine creates a device data structure which will contain identifying + * information for the device (host wwpn, target wwpn, lun), state of OAS, + * whether or not the corresponding lun is available by the system, + * and pointer to the rport data. + * + * Return codes: + *   NULL - Error + *   Pointer to lpfc_device_data - Success + **/ +struct lpfc_device_data* +lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, +			struct lpfc_name *target_wwpn, uint64_t lun, +			bool atomic_create) +{ + +	struct lpfc_device_data *lun_info; +	int memory_flags; + +	if (unlikely(!phba) || !vport_wwpn || !target_wwpn  || +	    !(phba->cfg_fof)) +		return NULL; + +	/* Attempt to create the device data to contain lun info */ + +	if (atomic_create) +		memory_flags = GFP_ATOMIC; +	else +		memory_flags = GFP_KERNEL; +	lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags); +	if (!lun_info) +		return NULL; +	INIT_LIST_HEAD(&lun_info->listentry); +	lun_info->rport_data  = NULL; +	memcpy(&lun_info->device_id.vport_wwpn, vport_wwpn, +	       sizeof(struct lpfc_name)); +	memcpy(&lun_info->device_id.target_wwpn, target_wwpn, +	       sizeof(struct lpfc_name)); +	lun_info->device_id.lun = lun; +	lun_info->oas_enabled = false; +	lun_info->available = false; +	return lun_info; +} + +/** + * lpfc_delete_device_data - frees a device data structure for OAS + * @pha: Pointer to host bus adapter structure. + * @lun_info: Pointer to device data structure to free. + * + * This routine frees the previously allocated device data structure passed. + * + **/ +void +lpfc_delete_device_data(struct lpfc_hba *phba, +			struct lpfc_device_data *lun_info) +{ + +	if (unlikely(!phba) || !lun_info  || +	    !(phba->cfg_fof)) +		return; + +	if (!list_empty(&lun_info->listentry)) +		list_del(&lun_info->listentry); +	mempool_free(lun_info, phba->device_data_mem_pool); +	return; +} + +/** + * __lpfc_get_device_data - returns the device data for the specified lun + * @pha: Pointer to host bus adapter structure. + * @list: Point to list to search. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun on target + * + * This routine searches the list passed for the specified lun's device data. + * This function does not hold locks, it is the responsibility of the caller + * to ensure the proper lock is held before calling the function. + * + * Return codes: + *   NULL - Error + *   Pointer to lpfc_device_data - Success + **/ +struct lpfc_device_data* +__lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list, +		       struct lpfc_name *vport_wwpn, +		       struct lpfc_name *target_wwpn, uint64_t lun) +{ + +	struct lpfc_device_data *lun_info; + +	if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn || +	    !phba->cfg_fof) +		return NULL; + +	/* Check to see if the lun is already enabled for OAS. */ + +	list_for_each_entry(lun_info, list, listentry) { +		if ((memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, +			    sizeof(struct lpfc_name)) == 0) && +		    (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, +			    sizeof(struct lpfc_name)) == 0) && +		    (lun_info->device_id.lun == lun)) +			return lun_info; +	} + +	return NULL; +} + +/** + * lpfc_find_next_oas_lun - searches for the next oas lun + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @starting_lun: Pointer to the lun to start searching for + * @found_vport_wwpn: Pointer to the found lun's vport wwpn information + * @found_target_wwpn: Pointer to the found lun's target wwpn information + * @found_lun: Pointer to the found lun. + * @found_lun_status: Pointer to status of the found lun. + * + * This routine searches the luns list for the specified lun + * or the first lun for the vport/target.  If the vport wwpn contains + * a zero value then a specific vport is not specified. In this case + * any vport which contains the lun will be considered a match.  If the + * target wwpn contains a zero value then a specific target is not specified. + * In this case any target which contains the lun will be considered a + * match.  If the lun is found, the lun, vport wwpn, target wwpn and lun status + * are returned.  The function will also return the next lun if available. + * If the next lun is not found, starting_lun parameter will be set to + * NO_MORE_OAS_LUN. + * + * Return codes: + *   non-0 - Error + *   0 - Success + **/ +bool +lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, +		       struct lpfc_name *target_wwpn, uint64_t *starting_lun, +		       struct lpfc_name *found_vport_wwpn, +		       struct lpfc_name *found_target_wwpn, +		       uint64_t *found_lun, +		       uint32_t *found_lun_status) +{ + +	unsigned long flags; +	struct lpfc_device_data *lun_info; +	struct lpfc_device_id *device_id; +	uint64_t lun; +	bool found = false; + +	if (unlikely(!phba) || !vport_wwpn || !target_wwpn || +	    !starting_lun || !found_vport_wwpn || +	    !found_target_wwpn || !found_lun || !found_lun_status || +	    (*starting_lun == NO_MORE_OAS_LUN) || +	    !phba->cfg_fof) +		return false; + +	lun = *starting_lun; +	*found_lun = NO_MORE_OAS_LUN; +	*starting_lun = NO_MORE_OAS_LUN; + +	/* Search for lun or the lun closet in value */ + +	spin_lock_irqsave(&phba->devicelock, flags); +	list_for_each_entry(lun_info, &phba->luns, listentry) { +		if (((wwn_to_u64(vport_wwpn->u.wwn) == 0) || +		     (memcmp(&lun_info->device_id.vport_wwpn, vport_wwpn, +			    sizeof(struct lpfc_name)) == 0)) && +		    ((wwn_to_u64(target_wwpn->u.wwn) == 0) || +		     (memcmp(&lun_info->device_id.target_wwpn, target_wwpn, +			    sizeof(struct lpfc_name)) == 0)) && +		    (lun_info->oas_enabled)) { +			device_id = &lun_info->device_id; +			if ((!found) && +			    ((lun == FIND_FIRST_OAS_LUN) || +			     (device_id->lun == lun))) { +				*found_lun = device_id->lun; +				memcpy(found_vport_wwpn, +				       &device_id->vport_wwpn, +				       sizeof(struct lpfc_name)); +				memcpy(found_target_wwpn, +				       &device_id->target_wwpn, +				       sizeof(struct lpfc_name)); +				if (lun_info->available) +					*found_lun_status = +						OAS_LUN_STATUS_EXISTS; +				else +					*found_lun_status = 0; +				if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT) +					memset(vport_wwpn, 0x0, +					       sizeof(struct lpfc_name)); +				if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET) +					memset(target_wwpn, 0x0, +					       sizeof(struct lpfc_name)); +				found = true; +			} else if (found) { +				*starting_lun = device_id->lun; +				memcpy(vport_wwpn, &device_id->vport_wwpn, +				       sizeof(struct lpfc_name)); +				memcpy(target_wwpn, &device_id->target_wwpn, +				       sizeof(struct lpfc_name)); +				break; +			} +		} +	} +	spin_unlock_irqrestore(&phba->devicelock, flags); +	return found; +} + +/** + * lpfc_enable_oas_lun - enables a lun for OAS operations + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun + * + * This routine enables a lun for oas operations.  The routines does so by + * doing the following : + * + *   1) Checks to see if the device data for the lun has been created. + *   2) If found, sets the OAS enabled flag if not set and returns. + *   3) Otherwise, creates a device data structure. + *   4) If successfully created, indicates the device data is for an OAS lun, + *   indicates the lun is not available and add to the list of luns. + * + * Return codes: + *   false - Error + *   true - Success + **/ +bool +lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, +		    struct lpfc_name *target_wwpn, uint64_t lun) +{ + +	struct lpfc_device_data *lun_info; +	unsigned long flags; + +	if (unlikely(!phba) || !vport_wwpn || !target_wwpn || +	    !phba->cfg_fof) +		return false; + +	spin_lock_irqsave(&phba->devicelock, flags); + +	/* Check to see if the device data for the lun has been created */ +	lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn, +					  target_wwpn, lun); +	if (lun_info) { +		if (!lun_info->oas_enabled) +			lun_info->oas_enabled = true; +		spin_unlock_irqrestore(&phba->devicelock, flags); +		return true; +	} + +	/* Create an lun info structure and add to list of luns */ +	lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun, +					   false); +	if (lun_info) { +		lun_info->oas_enabled = true; +		lun_info->available = false; +		list_add_tail(&lun_info->listentry, &phba->luns); +		spin_unlock_irqrestore(&phba->devicelock, flags); +		return true; +	} +	spin_unlock_irqrestore(&phba->devicelock, flags); +	return false; +} + +/** + * lpfc_disable_oas_lun - disables a lun for OAS operations + * @pha: Pointer to host bus adapter structure. + * @vport_wwpn: Pointer to vport's wwpn information + * @target_wwpn: Pointer to target's wwpn information + * @lun: Lun + * + * This routine disables a lun for oas operations.  The routines does so by + * doing the following : + * + *   1) Checks to see if the device data for the lun is created. + *   2) If present, clears the flag indicating this lun is for OAS. + *   3) If the lun is not available by the system, the device data is + *   freed. + * + * Return codes: + *   false - Error + *   true - Success + **/ +bool +lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn, +		     struct lpfc_name *target_wwpn, uint64_t lun) +{ + +	struct lpfc_device_data *lun_info; +	unsigned long flags; + +	if (unlikely(!phba) || !vport_wwpn || !target_wwpn || +	    !phba->cfg_fof) +		return false; + +	spin_lock_irqsave(&phba->devicelock, flags); + +	/* Check to see if the lun is available. */ +	lun_info = __lpfc_get_device_data(phba, +					  &phba->luns, vport_wwpn, +					  target_wwpn, lun); +	if (lun_info) { +		lun_info->oas_enabled = false; +		if (!lun_info->available) +			lpfc_delete_device_data(phba, lun_info); +		spin_unlock_irqrestore(&phba->devicelock, flags); +		return true; +	} + +	spin_unlock_irqrestore(&phba->devicelock, flags); +	return false; +}  struct scsi_host_template lpfc_template = {  	.module			= THIS_MODULE, diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index b1d9f7fcb91..0389ac1e7b8 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -41,6 +41,20 @@ struct lpfc_rport_data {  	struct lpfc_nodelist *pnode;	/* Pointer to the node structure. */  }; +struct lpfc_device_id { +	struct lpfc_name vport_wwpn; +	struct lpfc_name target_wwpn; +	uint64_t lun; +}; + +struct lpfc_device_data { +	struct list_head listentry; +	struct lpfc_rport_data *rport_data; +	struct lpfc_device_id device_id; +	bool oas_enabled; +	bool available; +}; +  struct fcp_rsp {  	uint32_t rspRsvd1;	/* FC Word 0, byte 0:3 */  	uint32_t rspRsvd2;	/* FC Word 1, byte 0:3 */ @@ -73,6 +87,7 @@ struct fcp_rsp {  #define RSP_RO_MISMATCH_ERR  0x03  #define RSP_TM_NOT_SUPPORTED 0x04	/* Task mgmt function not supported */  #define RSP_TM_NOT_COMPLETED 0x05	/* Task mgmt function not performed */ +#define RSP_TM_INVALID_LU    0x09	/* Task mgmt function to invalid LU */  	uint32_t rspInfoRsvd;	/* FCP_RSP_INFO bytes 4-7 (reserved) */ @@ -165,3 +180,7 @@ struct lpfc_scsi_buf {  #define LPFC_SCSI_DMA_EXT_SIZE 264  #define LPFC_BPL_SIZE          1024  #define MDAC_DIRECT_CMD                  0x22 + +#define FIND_FIRST_OAS_LUN		 0 +#define NO_MORE_OAS_LUN			-1 +#define NOT_OAS_ENABLED_LUN		NO_MORE_OAS_LUN diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 612f48973ff..32ada050557 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   * Portions Copyright (C) 2004-2005 Christoph Hellwig              * @@ -71,6 +71,8 @@ static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba *, struct list_head *,  				       int);  static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *, struct lpfc_eqe *,  			uint32_t); +static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); +static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);  static IOCB_t *  lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) @@ -263,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)  		return NULL;  	q->hba_index = idx; + +	/* +	 * insert barrier for instruction interlock : data from the hardware +	 * must have the valid bit checked before it can be copied and acted +	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative +	 * instructions allowing action on content before valid bit checked, +	 * add barrier here as well. May not be needed as "content" is a +	 * single 32-bit entity here (vs multi word structure for cq's). +	 */ +	mb();  	return eqe;  } @@ -368,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)  	cqe = q->qe[q->hba_index].cqe;  	q->hba_index = idx; + +	/* +	 * insert barrier for instruction interlock : data from the hardware +	 * must have the valid bit checked before it can be copied and acted +	 * upon. Speculative instructions were allowing a bcopy at the start +	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately +	 * after our return, to copy data before the valid bit check above +	 * was done. As such, some of the copied data was stale. The barrier +	 * ensures the check is before any data is copied. +	 */ +	mb();  	return cqe;  } @@ -633,7 +656,7 @@ lpfc_clr_rrq_active(struct lpfc_hba *phba,  	if (!ndlp)  		goto out; -	if (test_and_clear_bit(xritag, ndlp->active_rrqs.xri_bitmap)) { +	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {  		rrq->send_rrq = 0;  		rrq->xritag = 0;  		rrq->rrq_stop_time = 0; @@ -676,7 +699,8 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba)  			next_time = rrq->rrq_stop_time;  	}  	spin_unlock_irqrestore(&phba->hbalock, iflags); -	if (!list_empty(&phba->active_rrq_list)) +	if ((!list_empty(&phba->active_rrq_list)) && +	    (!(phba->pport->load_flag & FC_UNLOADING)))  		mod_timer(&phba->rrq_tmr, next_time);  	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {  		list_del(&rrq->list); @@ -790,7 +814,9 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)  		list_del(&rrq->list);  		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);  	} -	if (!list_empty(&phba->active_rrq_list)) +	if ((!list_empty(&phba->active_rrq_list)) && +	    (!(phba->pport->load_flag & FC_UNLOADING))) +  		mod_timer(&phba->rrq_tmr, next_time);  } @@ -811,7 +837,9 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,  {  	if (!ndlp)  		return 0; -	if (test_bit(xritag, ndlp->active_rrqs.xri_bitmap)) +	if (!ndlp->active_rrqs_xri_bitmap) +		return 0; +	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))  			return 1;  	else  		return 0; @@ -861,7 +889,10 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,  	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))  		goto out; -	if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap)) +	if (!ndlp->active_rrqs_xri_bitmap) +		goto out; + +	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))  		goto out;  	spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -1316,7 +1347,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,  	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&  	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) && -	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) { +	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN) && +	 (!(piocb->vport->load_flag & FC_UNLOADING))) {  		if (!piocb->vport)  			BUG();  		else @@ -3500,14 +3532,27 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)  	/* Error everything on txq and txcmplq  	 * First do the txq.  	 */ -	spin_lock_irq(&phba->hbalock); -	list_splice_init(&pring->txq, &completions); +	if (phba->sli_rev >= LPFC_SLI_REV4) { +		spin_lock_irq(&pring->ring_lock); +		list_splice_init(&pring->txq, &completions); +		pring->txq_cnt = 0; +		spin_unlock_irq(&pring->ring_lock); -	/* Next issue ABTS for everything on the txcmplq */ -	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) -		lpfc_sli_issue_abort_iotag(phba, pring, iocb); +		spin_lock_irq(&phba->hbalock); +		/* Next issue ABTS for everything on the txcmplq */ +		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) +			lpfc_sli_issue_abort_iotag(phba, pring, iocb); +		spin_unlock_irq(&phba->hbalock); +	} else { +		spin_lock_irq(&phba->hbalock); +		list_splice_init(&pring->txq, &completions); +		pring->txq_cnt = 0; -	spin_unlock_irq(&phba->hbalock); +		/* Next issue ABTS for everything on the txcmplq */ +		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) +			lpfc_sli_issue_abort_iotag(phba, pring, iocb); +		spin_unlock_irq(&phba->hbalock); +	}  	/* Cancel all the IOCBs from the completions list */  	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, @@ -3515,6 +3560,36 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)  }  /** + * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function aborts all iocbs in FCP rings and frees all the iocb + * objects in txq. This function issues an abort iocb for all the iocb commands + * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before + * the return of this function. The caller is not required to hold any locks. + **/ +void +lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) +{ +	struct lpfc_sli *psli = &phba->sli; +	struct lpfc_sli_ring  *pring; +	uint32_t i; + +	/* Look on all the FCP Rings for the iotag */ +	if (phba->sli_rev >= LPFC_SLI_REV4) { +		for (i = 0; i < phba->cfg_fcp_io_channel; i++) { +			pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; +			lpfc_sli_abort_iocb_ring(phba, pring); +		} +	} else { +		pring = &psli->ring[psli->fcp_ring]; +		lpfc_sli_abort_iocb_ring(phba, pring); +	} +} + + +/**   * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring   * @phba: Pointer to HBA context object.   * @@ -3531,28 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)  	LIST_HEAD(txcmplq);  	struct lpfc_sli *psli = &phba->sli;  	struct lpfc_sli_ring  *pring; - -	/* Currently, only one fcp ring */ -	pring = &psli->ring[psli->fcp_ring]; +	uint32_t i;  	spin_lock_irq(&phba->hbalock); -	/* Retrieve everything on txq */ -	list_splice_init(&pring->txq, &txq); - -	/* Retrieve everything on the txcmplq */ -	list_splice_init(&pring->txcmplq, &txcmplq); -  	/* Indicate the I/O queues are flushed */  	phba->hba_flag |= HBA_FCP_IOQ_FLUSH;  	spin_unlock_irq(&phba->hbalock); -	/* Flush the txq */ -	lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, -			      IOERR_SLI_DOWN); +	/* Look on all the FCP Rings for the iotag */ +	if (phba->sli_rev >= LPFC_SLI_REV4) { +		for (i = 0; i < phba->cfg_fcp_io_channel; i++) { +			pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS]; + +			spin_lock_irq(&pring->ring_lock); +			/* Retrieve everything on txq */ +			list_splice_init(&pring->txq, &txq); +			/* Retrieve everything on the txcmplq */ +			list_splice_init(&pring->txcmplq, &txcmplq); +			pring->txq_cnt = 0; +			pring->txcmplq_cnt = 0; +			spin_unlock_irq(&pring->ring_lock); + +			/* Flush the txq */ +			lpfc_sli_cancel_iocbs(phba, &txq, +					      IOSTAT_LOCAL_REJECT, +					      IOERR_SLI_DOWN); +			/* Flush the txcmpq */ +			lpfc_sli_cancel_iocbs(phba, &txcmplq, +					      IOSTAT_LOCAL_REJECT, +					      IOERR_SLI_DOWN); +		} +	} else { +		pring = &psli->ring[psli->fcp_ring]; -	/* Flush the txcmpq */ -	lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, -			      IOERR_SLI_DOWN); +		spin_lock_irq(&phba->hbalock); +		/* Retrieve everything on txq */ +		list_splice_init(&pring->txq, &txq); +		/* Retrieve everything on the txcmplq */ +		list_splice_init(&pring->txcmplq, &txcmplq); +		pring->txq_cnt = 0; +		pring->txcmplq_cnt = 0; +		spin_unlock_irq(&phba->hbalock); + +		/* Flush the txq */ +		lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT, +				      IOERR_SLI_DOWN); +		/* Flush the txcmpq */ +		lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT, +				      IOERR_SLI_DOWN); +	}  }  /** @@ -3955,12 +4057,13 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)  {  	struct lpfc_sli *psli = &phba->sli;  	uint16_t cfg_value; -	int rc; +	int rc = 0;  	/* Reset HBA */  	lpfc_printf_log(phba, KERN_INFO, LOG_SLI, -			"0295 Reset HBA Data: x%x x%x\n", -			phba->pport->port_state, psli->sli_flag); +			"0295 Reset HBA Data: x%x x%x x%x\n", +			phba->pport->port_state, psli->sli_flag, +			phba->hba_flag);  	/* perform board reset */  	phba->fc_eventTag = 0; @@ -3973,6 +4076,12 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)  	phba->fcf.fcf_flag = 0;  	spin_unlock_irq(&phba->hbalock); +	/* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */ +	if (phba->hba_flag & HBA_FW_DUMP_OP) { +		phba->hba_flag &= ~HBA_FW_DUMP_OP; +		return rc; +	} +  	/* Now physically reset the device */  	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,  			"0389 Performing PCI function reset!\n"); @@ -4969,12 +5078,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)  					     LPFC_QUEUE_REARM);  		} while (++fcp_eqidx < phba->cfg_fcp_io_channel);  	} + +	if (phba->cfg_fof) +		lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM); +  	if (phba->sli4_hba.hba_eq) {  		for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;  		     fcp_eqidx++)  			lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[fcp_eqidx],  					     LPFC_QUEUE_REARM);  	} + +	if (phba->cfg_fof) +		lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);  }  /** @@ -6566,6 +6682,108 @@ lpfc_mbox_timeout(unsigned long ptr)  	return;  } +/** + * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions + *                                    are pending + * @phba: Pointer to HBA context object. + * + * This function checks if any mailbox completions are present on the mailbox + * completion queue. + **/ +bool +lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba) +{ + +	uint32_t idx; +	struct lpfc_queue *mcq; +	struct lpfc_mcqe *mcqe; +	bool pending_completions = false; + +	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) +		return false; + +	/* Check for completions on mailbox completion queue */ + +	mcq = phba->sli4_hba.mbx_cq; +	idx = mcq->hba_index; +	while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) { +		mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe; +		if (bf_get_le32(lpfc_trailer_completed, mcqe) && +		    (!bf_get_le32(lpfc_trailer_async, mcqe))) { +			pending_completions = true; +			break; +		} +		idx = (idx + 1) % mcq->entry_count; +		if (mcq->hba_index == idx) +			break; +	} +	return pending_completions; + +} + +/** + * lpfc_sli4_process_missed_mbox_completions - process mbox completions + *					      that were missed. + * @phba: Pointer to HBA context object. + * + * For sli4, it is possible to miss an interrupt. As such mbox completions + * maybe missed causing erroneous mailbox timeouts to occur. This function + * checks to see if mbox completions are on the mailbox completion queue + * and will process all the completions associated with the eq for the + * mailbox completion queue. + **/ +bool +lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) +{ + +	uint32_t eqidx; +	struct lpfc_queue *fpeq = NULL; +	struct lpfc_eqe *eqe; +	bool mbox_pending; + +	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4)) +		return false; + +	/* Find the eq associated with the mcq */ + +	if (phba->sli4_hba.hba_eq) +		for (eqidx = 0; eqidx < phba->cfg_fcp_io_channel; eqidx++) +			if (phba->sli4_hba.hba_eq[eqidx]->queue_id == +			    phba->sli4_hba.mbx_cq->assoc_qid) { +				fpeq = phba->sli4_hba.hba_eq[eqidx]; +				break; +			} +	if (!fpeq) +		return false; + +	/* Turn off interrupts from this EQ */ + +	lpfc_sli4_eq_clr_intr(fpeq); + +	/* Check to see if a mbox completion is pending */ + +	mbox_pending = lpfc_sli4_mbox_completions_pending(phba); + +	/* +	 * If a mbox completion is pending, process all the events on EQ +	 * associated with the mbox completion queue (this could include +	 * mailbox commands, async events, els commands, receive queue data +	 * and fcp commands) +	 */ + +	if (mbox_pending) +		while ((eqe = lpfc_sli4_eq_get(fpeq))) { +			lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx); +			fpeq->EQ_processed++; +		} + +	/* Always clear and re-arm the EQ */ + +	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM); + +	return mbox_pending; + +}  /**   * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout @@ -6581,7 +6799,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)  	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;  	MAILBOX_t *mb = &pmbox->u.mb;  	struct lpfc_sli *psli = &phba->sli; -	struct lpfc_sli_ring *pring; + +	/* If the mailbox completed, process the completion and return */ +	if (lpfc_sli4_process_missed_mbox_completions(phba)) +		return;  	/* Check the pmbox pointer first.  There is a race condition  	 * between the mbox timeout handler getting executed in the @@ -6619,8 +6840,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)  	psli->sli_flag &= ~LPFC_SLI_ACTIVE;  	spin_unlock_irq(&phba->hbalock); -	pring = &psli->ring[psli->fcp_ring]; -	lpfc_sli_abort_iocb_ring(phba, pring); +	lpfc_sli_abort_fcp_rings(phba);  	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,  			"0345 Resetting board due to mailbox timeout\n"); @@ -7077,6 +7297,10 @@ lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)  						1000) + jiffies;  	spin_unlock_irq(&phba->hbalock); +	/* Make sure the mailbox is really active */ +	if (timeout) +		lpfc_sli4_process_missed_mbox_completions(phba); +  	/* Wait for the outstnading mailbox command to complete */  	while (phba->sli.mbox_active) {  		/* Check active mailbox complete status every 2ms */ @@ -7920,7 +8144,8 @@ lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)  	struct lpfc_vector_map_info *cpup;  	int chann, cpu; -	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) { +	if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU +	    && phba->cfg_fcp_io_channel > 1) {  		cpu = smp_processor_id();  		if (cpu < phba->sli4_hba.num_present_cpu) {  			cpup = phba->sli4_hba.cpu_map; @@ -7983,6 +8208,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,  	abort_tag = (uint32_t) iocbq->iotag;  	xritag = iocbq->sli4_xritag;  	wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ +	wqe->generic.wqe_com.word10 = 0;  	/* words0-2 bpl convert bde */  	if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {  		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / @@ -8076,6 +8302,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,  		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);  		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);  		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); +		wqe->els_req.max_response_payload_len = total_len - xmit_len;  		break;  	case CMD_XMIT_SEQUENCE64_CX:  		bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, @@ -8120,8 +8347,10 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,  		command_type = FCP_COMMAND_DATA_OUT;  		/* word3 iocb=iotag wqe=payload_offset_len */  		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */ -		wqe->fcp_iwrite.payload_offset_len = -			xmit_len + sizeof(struct fcp_rsp); +		bf_set(payload_offset_len, &wqe->fcp_iwrite, +		       xmit_len + sizeof(struct fcp_rsp)); +		bf_set(cmd_buff_len, &wqe->fcp_iwrite, +		       0);  		/* word4 iocb=parameter wqe=total_xfer_length memcpy */  		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */  		bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, @@ -8135,12 +8364,22 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,  		bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);  		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);  		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); +		if (iocbq->iocb_flag & LPFC_IO_OAS) { +			bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1); +			if (phba->cfg_XLanePriority) { +				bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1); +				bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com, +				       (phba->cfg_XLanePriority << 1)); +			} +		}  		break;  	case CMD_FCP_IREAD64_CR:  		/* word3 iocb=iotag wqe=payload_offset_len */  		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */ -		wqe->fcp_iread.payload_offset_len = -			xmit_len + sizeof(struct fcp_rsp); +		bf_set(payload_offset_len, &wqe->fcp_iread, +		       xmit_len + sizeof(struct fcp_rsp)); +		bf_set(cmd_buff_len, &wqe->fcp_iread, +		       0);  		/* word4 iocb=parameter wqe=total_xfer_length memcpy */  		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */  		bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, @@ -8154,10 +8393,23 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,  		bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);  		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);  		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); +		if (iocbq->iocb_flag & LPFC_IO_OAS) { +			bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1); +			if (phba->cfg_XLanePriority) { +				bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1); +				bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com, +				       (phba->cfg_XLanePriority << 1)); +			} +		}  		break;  	case CMD_FCP_ICMND64_CR: +		/* word3 iocb=iotag wqe=payload_offset_len */ +		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */ +		bf_set(payload_offset_len, &wqe->fcp_icmd, +		       xmit_len + sizeof(struct fcp_rsp)); +		bf_set(cmd_buff_len, &wqe->fcp_icmd, +		       0);  		/* word3 iocb=IO_TAG wqe=reserved */ -		wqe->fcp_icmd.rsrvd3 = 0;  		bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);  		/* Always open the exchange */  		bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); @@ -8169,6 +8421,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,  		bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);  		bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,  		       iocbq->iocb.ulpFCP2Rcvy); +		if (iocbq->iocb_flag & LPFC_IO_OAS) { +			bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1); +			if (phba->cfg_XLanePriority) { +				bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1); +				bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com, +				       (phba->cfg_XLanePriority << 1)); +			} +		}  		break;  	case CMD_GEN_REQUEST64_CR:  		/* For this command calculate the xmit length of the @@ -8203,6 +8463,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,  		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);  		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);  		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); +		wqe->gen_req.max_response_payload_len = total_len - xmit_len;  		command_type = OTHER_COMMAND;  		break;  	case CMD_XMIT_ELS_RSP64_CX: @@ -8400,6 +8661,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,  {  	struct lpfc_sglq *sglq;  	union lpfc_wqe wqe; +	struct lpfc_queue *wq;  	struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];  	if (piocb->sli4_xritag == NO_XRI) { @@ -8452,11 +8714,13 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,  		return IOCB_ERROR;  	if ((piocb->iocb_flag & LPFC_IO_FCP) || -		(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { -		if (unlikely(!phba->sli4_hba.fcp_wq)) -			return IOCB_ERROR; -		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], -				     &wqe)) +	    (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { +		if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS))) { +			wq = phba->sli4_hba.fcp_wq[piocb->fcp_wqidx]; +		} else { +			wq = phba->sli4_hba.oas_wq; +		} +		if (lpfc_sli4_wq_put(wq, &wqe))  			return IOCB_ERROR;  	} else {  		if (unlikely(!phba->sli4_hba.els_wq)) @@ -8546,12 +8810,20 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,  	if (phba->sli_rev == LPFC_SLI_REV4) {  		if (piocb->iocb_flag &  LPFC_IO_FCP) { -			if (unlikely(!phba->sli4_hba.fcp_wq)) -				return IOCB_ERROR; -			idx = lpfc_sli4_scmd_to_wqidx_distr(phba); -			piocb->fcp_wqidx = idx; -			ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; - +			if (!phba->cfg_fof || (!(piocb->iocb_flag & +				LPFC_IO_OAS))) { +				if (unlikely(!phba->sli4_hba.fcp_wq)) +					return IOCB_ERROR; +				idx = lpfc_sli4_scmd_to_wqidx_distr(phba); +				piocb->fcp_wqidx = idx; +				ring_number = MAX_SLI3_CONFIGURED_RINGS + idx; +			} else { +				if (unlikely(!phba->sli4_hba.oas_wq)) +					return IOCB_ERROR; +				idx = 0; +				piocb->fcp_wqidx = 0; +				ring_number =  LPFC_FCP_OAS_RING; +			}  			pring = &phba->sli.ring[ring_number];  			spin_lock_irqsave(&pring->ring_lock, iflags);  			rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, @@ -8973,6 +9245,7 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)  		pring->sli.sli3.next_cmdidx  = 0;  		pring->sli.sli3.local_getidx = 0;  		pring->sli.sli3.cmdidx = 0; +		pring->flag = 0;  		INIT_LIST_HEAD(&pring->txq);  		INIT_LIST_HEAD(&pring->txcmplq);  		INIT_LIST_HEAD(&pring->iocb_continueq); @@ -9608,43 +9881,6 @@ abort_iotag_exit:  }  /** - * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring - * @phba: Pointer to HBA context object. - * @pring: Pointer to driver SLI ring object. - * - * This function aborts all iocbs in the given ring and frees all the iocb - * objects in txq. This function issues abort iocbs unconditionally for all - * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed - * to complete before the return of this function. The caller is not required - * to hold any locks. - **/ -static void -lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) -{ -	LIST_HEAD(completions); -	struct lpfc_iocbq *iocb, *next_iocb; - -	if (pring->ringno == LPFC_ELS_RING) -		lpfc_fabric_abort_hba(phba); - -	spin_lock_irq(&phba->hbalock); - -	/* Take off all the iocbs on txq for cancelling */ -	list_splice_init(&pring->txq, &completions); -	pring->txq_cnt = 0; - -	/* Next issue ABTS for everything on the txcmplq */ -	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) -		lpfc_sli_abort_iotag_issue(phba, pring, iocb); - -	spin_unlock_irq(&phba->hbalock); - -	/* Cancel all the IOCBs from the completions list */ -	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, -			      IOERR_SLI_ABORTED); -} - -/**   * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.   * @phba: pointer to lpfc HBA data structure.   * @@ -9659,7 +9895,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)  	for (i = 0; i < psli->num_rings; i++) {  		pring = &psli->ring[i]; -		lpfc_sli_iocb_ring_abort(phba, pring); +		lpfc_sli_abort_iocb_ring(phba, pring);  	}  } @@ -9884,6 +10120,124 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,  }  /** + * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN + * @vport: Pointer to virtual port. + * @pring: Pointer to driver SLI ring object. + * @tgt_id: SCSI ID of the target. + * @lun_id: LUN ID of the scsi device. + * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST. + * + * This function sends an abort command for every SCSI command + * associated with the given virtual port pending on the ring + * filtered by lpfc_sli_validate_fcp_iocb function. + * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the + * FCP iocbs associated with lun specified by tgt_id and lun_id + * parameters + * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the + * FCP iocbs associated with SCSI target specified by tgt_id parameter. + * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all + * FCP iocbs associated with virtual port. + * This function returns number of iocbs it aborted . + * This function is called with no locks held right after a taskmgmt + * command is sent. + **/ +int +lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, +			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) +{ +	struct lpfc_hba *phba = vport->phba; +	struct lpfc_iocbq *abtsiocbq; +	struct lpfc_iocbq *iocbq; +	IOCB_t *icmd; +	int sum, i, ret_val; +	unsigned long iflags; +	struct lpfc_sli_ring *pring_s4; +	uint32_t ring_number; + +	spin_lock_irq(&phba->hbalock); + +	/* all I/Os are in process of being flushed */ +	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { +		spin_unlock_irq(&phba->hbalock); +		return 0; +	} +	sum = 0; + +	for (i = 1; i <= phba->sli.last_iotag; i++) { +		iocbq = phba->sli.iocbq_lookup[i]; + +		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id, +					       cmd) != 0) +			continue; + +		/* +		 * If the iocbq is already being aborted, don't take a second +		 * action, but do count it. +		 */ +		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) +			continue; + +		/* issue ABTS for this IOCB based on iotag */ +		abtsiocbq = __lpfc_sli_get_iocbq(phba); +		if (abtsiocbq == NULL) +			continue; + +		icmd = &iocbq->iocb; +		abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; +		abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext; +		if (phba->sli_rev == LPFC_SLI_REV4) +			abtsiocbq->iocb.un.acxri.abortIoTag = +							 iocbq->sli4_xritag; +		else +			abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag; +		abtsiocbq->iocb.ulpLe = 1; +		abtsiocbq->iocb.ulpClass = icmd->ulpClass; +		abtsiocbq->vport = vport; + +		/* ABTS WQE must go to the same WQ as the WQE to be aborted */ +		abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; +		if (iocbq->iocb_flag & LPFC_IO_FCP) +			abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; + +		if (lpfc_is_link_up(phba)) +			abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; +		else +			abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; + +		/* Setup callback routine and issue the command. */ +		abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; + +		/* +		 * Indicate the IO is being aborted by the driver and set +		 * the caller's flag into the aborted IO. +		 */ +		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; + +		if (phba->sli_rev == LPFC_SLI_REV4) { +			ring_number = MAX_SLI3_CONFIGURED_RINGS + +					 iocbq->fcp_wqidx; +			pring_s4 = &phba->sli.ring[ring_number]; +			/* Note: both hbalock and ring_lock must be set here */ +			spin_lock_irqsave(&pring_s4->ring_lock, iflags); +			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, +							abtsiocbq, 0); +			spin_unlock_irqrestore(&pring_s4->ring_lock, iflags); +		} else { +			ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno, +							abtsiocbq, 0); +		} + + +		if (ret_val == IOCB_ERROR) +			__lpfc_sli_release_iocbq(phba, abtsiocbq); +		else +			sum++; +	} +	spin_unlock_irq(&phba->hbalock); +	return sum; +} + +/**   * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler   * @phba: Pointer to HBA context object.   * @cmdiocbq: Pointer to command iocb. @@ -10073,6 +10427,11 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,  		if (iocb_completed) {  			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,  					"0331 IOCB wake signaled\n"); +			/* Note: we are not indicating if the IOCB has a success +			 * status or not - that's for the caller to check. +			 * IOCB_SUCCESS means just that the command was sent and +			 * completed. Not that it completed successfully. +			 * */  		} else if (timeleft == 0) {  			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,  					"0338 IOCB wait timeout error - no " @@ -11074,8 +11433,11 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,  			      struct lpfc_iocbq *pIocbOut,  			      struct lpfc_wcqe_complete *wcqe)  { +	int numBdes, i;  	unsigned long iflags; -	uint32_t status; +	uint32_t status, max_response; +	struct lpfc_dmabuf *dmabuf; +	struct ulp_bde64 *bpl, bde;  	size_t offset = offsetof(struct lpfc_iocbq, iocb);  	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, @@ -11092,7 +11454,36 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,  			pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;  	else {  		pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter; -		pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; +		switch (pIocbOut->iocb.ulpCommand) { +		case CMD_ELS_REQUEST64_CR: +			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; +			bpl  = (struct ulp_bde64 *)dmabuf->virt; +			bde.tus.w = le32_to_cpu(bpl[1].tus.w); +			max_response = bde.tus.f.bdeSize; +			break; +		case CMD_GEN_REQUEST64_CR: +			max_response = 0; +			if (!pIocbOut->context3) +				break; +			numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/ +					sizeof(struct ulp_bde64); +			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3; +			bpl = (struct ulp_bde64 *)dmabuf->virt; +			for (i = 0; i < numBdes; i++) { +				bde.tus.w = le32_to_cpu(bpl[i].tus.w); +				if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64) +					max_response += bde.tus.f.bdeSize; +			} +			break; +		default: +			max_response = wcqe->total_data_placed; +			break; +		} +		if (max_response < wcqe->total_data_placed) +			pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response; +		else +			pIocbIn->iocb.un.genreq64.bdl.bdeSize = +				wcqe->total_data_placed;  	}  	/* Convert BG errors for completion status */ @@ -11972,6 +12363,175 @@ lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)  	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);  } + +/** + * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue + *			     entry + * @phba: Pointer to HBA context object. + * @eqe: Pointer to fast-path event queue entry. + * + * This routine process a event queue entry from the Flash Optimized Fabric + * event queue.  It will check the MajorCode and MinorCode to determine this + * is for a completion event on a completion queue, if not, an error shall be + * logged and just return. Otherwise, it will get to the corresponding + * completion queue and process all the entries on the completion queue, rearm + * the completion queue, and then return. + **/ +static void +lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) +{ +	struct lpfc_queue *cq; +	struct lpfc_cqe *cqe; +	bool workposted = false; +	uint16_t cqid; +	int ecount = 0; + +	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) { +		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +				"9147 Not a valid completion " +				"event: majorcode=x%x, minorcode=x%x\n", +				bf_get_le32(lpfc_eqe_major_code, eqe), +				bf_get_le32(lpfc_eqe_minor_code, eqe)); +		return; +	} + +	/* Get the reference to the corresponding CQ */ +	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + +	/* Next check for OAS */ +	cq = phba->sli4_hba.oas_cq; +	if (unlikely(!cq)) { +		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) +			lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +					"9148 OAS completion queue " +					"does not exist\n"); +		return; +	} + +	if (unlikely(cqid != cq->queue_id)) { +		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +				"9149 Miss-matched fast-path compl " +				"queue id: eqcqid=%d, fcpcqid=%d\n", +				cqid, cq->queue_id); +		return; +	} + +	/* Process all the entries to the OAS CQ */ +	while ((cqe = lpfc_sli4_cq_get(cq))) { +		workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); +		if (!(++ecount % cq->entry_repost)) +			lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); +	} + +	/* Track the max number of CQEs processed in 1 EQ */ +	if (ecount > cq->CQ_max_cqe) +		cq->CQ_max_cqe = ecount; + +	/* Catch the no cq entry condition */ +	if (unlikely(ecount == 0)) +		lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +				"9153 No entry from fast-path completion " +				"queue fcpcqid=%d\n", cq->queue_id); + +	/* In any case, flash and re-arm the CQ */ +	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM); + +	/* wake up worker thread if there are works to be done */ +	if (workposted) +		lpfc_worker_wake_up(phba); +} + +/** + * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device + * @irq: Interrupt number. + * @dev_id: The device context pointer. + * + * This function is directly called from the PCI layer as an interrupt + * service routine when device with SLI-4 interface spec is enabled with + * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric + * IOCB ring event in the HBA. However, when the device is enabled with either + * MSI or Pin-IRQ interrupt mode, this function is called as part of the + * device-level interrupt handler. When the PCI slot is in error recovery + * or the HBA is undergoing initialization, the interrupt handler will not + * process the interrupt. The Flash Optimized Fabric ring event are handled in + * the intrrupt context. This function is called without any lock held. + * It gets the hbalock to access and update SLI data structures. Note that, + * the EQ to CQ are one-to-one map such that the EQ index is + * equal to that of CQ index. + * + * This function returns IRQ_HANDLED when interrupt is handled else it + * returns IRQ_NONE. + **/ +irqreturn_t +lpfc_sli4_fof_intr_handler(int irq, void *dev_id) +{ +	struct lpfc_hba *phba; +	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; +	struct lpfc_queue *eq; +	struct lpfc_eqe *eqe; +	unsigned long iflag; +	int ecount = 0; +	uint32_t eqidx; + +	/* Get the driver's phba structure from the dev_id */ +	fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id; +	phba = fcp_eq_hdl->phba; +	eqidx = fcp_eq_hdl->idx; + +	if (unlikely(!phba)) +		return IRQ_NONE; + +	/* Get to the EQ struct associated with this vector */ +	eq = phba->sli4_hba.fof_eq; +	if (unlikely(!eq)) +		return IRQ_NONE; + +	/* Check device state for handling interrupt */ +	if (unlikely(lpfc_intr_state_check(phba))) { +		eq->EQ_badstate++; +		/* Check again for link_state with lock held */ +		spin_lock_irqsave(&phba->hbalock, iflag); +		if (phba->link_state < LPFC_LINK_DOWN) +			/* Flush, clear interrupt, and rearm the EQ */ +			lpfc_sli4_eq_flush(phba, eq); +		spin_unlock_irqrestore(&phba->hbalock, iflag); +		return IRQ_NONE; +	} + +	/* +	 * Process all the event on FCP fast-path EQ +	 */ +	while ((eqe = lpfc_sli4_eq_get(eq))) { +		lpfc_sli4_fof_handle_eqe(phba, eqe); +		if (!(++ecount % eq->entry_repost)) +			lpfc_sli4_eq_release(eq, LPFC_QUEUE_NOARM); +		eq->EQ_processed++; +	} + +	/* Track the max number of EQEs processed in 1 intr */ +	if (ecount > eq->EQ_max_eqe) +		eq->EQ_max_eqe = ecount; + + +	if (unlikely(ecount == 0)) { +		eq->EQ_no_entry++; + +		if (phba->intr_type == MSIX) +			/* MSI-X treated interrupt served as no EQ share INT */ +			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, +					"9145 MSI-X interrupt with no EQE\n"); +		else { +			lpfc_printf_log(phba, KERN_ERR, LOG_SLI, +					"9146 ISR interrupt with no EQE\n"); +			/* Non MSI-X treated on interrupt as EQ share INT */ +			return IRQ_NONE; +		} +	} +	/* Always clear and re-arm the fast-path EQ */ +	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM); +	return IRQ_HANDLED; +} +  /**   * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device   * @irq: Interrupt number. @@ -12127,6 +12687,13 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)  			hba_handled |= true;  	} +	if (phba->cfg_fof) { +		hba_irq_rc = lpfc_sli4_fof_intr_handler(irq, +					&phba->sli4_hba.fcp_eq_hdl[0]); +		if (hba_irq_rc == IRQ_HANDLED) +			hba_handled |= true; +	} +  	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;  } /* lpfc_sli4_intr_handler */ @@ -15098,6 +15665,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)  	uint16_t max_rpi, rpi_limit;  	uint16_t rpi_remaining, lrpi = 0;  	struct lpfc_rpi_hdr *rpi_hdr; +	unsigned long iflag;  	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;  	rpi_limit = phba->sli4_hba.next_rpi; @@ -15106,7 +15674,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)  	 * Fetch the next logical rpi.  Because this index is logical,  	 * the  driver starts at 0 each time.  	 */ -	spin_lock_irq(&phba->hbalock); +	spin_lock_irqsave(&phba->hbalock, iflag);  	rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);  	if (rpi >= rpi_limit)  		rpi = LPFC_RPI_ALLOC_ERROR; @@ -15122,7 +15690,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)  	 */  	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&  	    (phba->sli4_hba.rpi_count >= max_rpi)) { -		spin_unlock_irq(&phba->hbalock); +		spin_unlock_irqrestore(&phba->hbalock, iflag);  		return rpi;  	} @@ -15131,7 +15699,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)  	 * extents.  	 */  	if (!phba->sli4_hba.rpi_hdrs_in_use) { -		spin_unlock_irq(&phba->hbalock); +		spin_unlock_irqrestore(&phba->hbalock, iflag);  		return rpi;  	} @@ -15142,7 +15710,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)  	 * how many are supported max by the device.  	 */  	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count; -	spin_unlock_irq(&phba->hbalock); +	spin_unlock_irqrestore(&phba->hbalock, iflag);  	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {  		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);  		if (!rpi_hdr) { @@ -16383,7 +16951,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)  {  	LIST_HEAD(completions);  	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING]; -	struct lpfc_iocbq *piocbq = 0; +	struct lpfc_iocbq *piocbq = NULL;  	unsigned long iflags = 0;  	char *fail_msg = NULL;  	struct lpfc_sglq *sglq; diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 6b0f2478706..edb48832c39 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -78,6 +78,8 @@ struct lpfc_iocbq {  #define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */  #define LPFC_FIP_ELS_ID_SHIFT	14 +#define LPFC_IO_OAS		0x10000 /* OAS FCP IO */ +  	uint32_t drvrTimeout;	/* driver timeout in seconds */  	uint32_t fcp_wqidx;	/* index to FCP work queue */  	struct lpfc_vport *vport;/* virtual port pointer */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 85120b77aa0..7f50aa04d66 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2009-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2009-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -39,6 +39,10 @@  #define LPFC_FCP_IO_CHAN_MIN       1  #define LPFC_FCP_IO_CHAN_MAX       16 +/* Number of channels used for Flash Optimized Fabric (FOF) operations */ + +#define LPFC_FOF_IO_CHAN_NUM       1 +  /*   * Provide the default FCF Record attributes used by the driver   * when nonFIP mode is configured and there is no other default @@ -399,6 +403,7 @@ struct lpfc_pc_sli4_params {  	uint32_t if_page_sz;  	uint32_t rq_db_window;  	uint32_t loopbk_scope; +	uint32_t oas_supported;  	uint32_t eq_pages_max;  	uint32_t eqe_size;  	uint32_t cq_pages_max; @@ -439,6 +444,8 @@ struct lpfc_sli4_lnk_info {  	uint8_t lnk_no;  }; +#define LPFC_SLI4_HANDLER_CNT		(LPFC_FCP_IO_CHAN_MAX+ \ +					 LPFC_FOF_IO_CHAN_NUM)  #define LPFC_SLI4_HANDLER_NAME_SZ	16  /* Used for IRQ vector to CPU mapping */ @@ -507,7 +514,7 @@ struct lpfc_sli4_hba {  	struct lpfc_register sli_intf;  	struct lpfc_pc_sli4_params pc_sli4_params;  	struct msix_entry *msix_entries; -	uint8_t handler_name[LPFC_FCP_IO_CHAN_MAX][LPFC_SLI4_HANDLER_NAME_SZ]; +	uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];  	struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */  	/* Pointers to the constructed SLI4 queues */ @@ -527,6 +534,17 @@ struct lpfc_sli4_hba {  	uint32_t ulp0_mode;	/* ULP0 protocol mode */  	uint32_t ulp1_mode;	/* ULP1 protocol mode */ +	struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */ + +	/* Optimized Access Storage specific queues/structures */ + +	struct lpfc_queue *oas_cq; /* OAS completion queue */ +	struct lpfc_queue *oas_wq; /* OAS Work queue */ +	struct lpfc_sli_ring *oas_ring; +	uint64_t oas_next_lun; +	uint8_t oas_next_tgt_wwpn[8]; +	uint8_t oas_next_vpt_wwpn[8]; +  	/* Setup information for various queue parameters */  	int eq_esize;  	int eq_ecount; @@ -589,6 +607,7 @@ struct lpfc_sli4_hba {  	struct lpfc_vector_map_info *cpu_map;  	uint16_t num_online_cpu;  	uint16_t num_present_cpu; +	uint16_t curr_disp_cpu;  };  enum lpfc_sge_type { @@ -673,6 +692,7 @@ void lpfc_sli4_queue_unset(struct lpfc_hba *);  int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);  int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);  uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *); +void lpfc_sli4_free_xri(struct lpfc_hba *, int);  int lpfc_sli4_post_async_mbox(struct lpfc_hba *);  int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);  struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index f58f18342bc..41675c1193e 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -1,7 +1,7 @@  /*******************************************************************   * This file is part of the Emulex Linux Device Driver for         *   * Fibre Channel Host Bus Adapters.                                * - * Copyright (C) 2004-2013 Emulex.  All rights reserved.           * + * Copyright (C) 2004-2014 Emulex.  All rights reserved.           *   * EMULEX and SLI are trademarks of Emulex.                        *   * www.emulex.com                                                  *   *                                                                 * @@ -18,7 +18,7 @@   * included with this package.                                     *   *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.42" +#define LPFC_DRIVER_VERSION "10.2.8001.0."  #define LPFC_DRIVER_NAME		"lpfc"  /* Used for SLI 2/3 */ @@ -30,4 +30,4 @@  #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \  		LPFC_DRIVER_VERSION -#define LPFC_COPYRIGHT "Copyright(c) 2004-2013 Emulex.  All rights reserved." +#define LPFC_COPYRIGHT "Copyright(c) 2004-2014 Emulex.  All rights reserved." diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 858075723c8..6a039eb1cbc 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -25,10 +25,6 @@   * 1+ (800) 334-5454   */ -/* - * $Log: mac_NCR5380.c,v $ - */ -  #include <linux/types.h>  #include <linux/stddef.h>  #include <linux/ctype.h> @@ -58,12 +54,6 @@  #include "NCR5380.h" -#if 0 -#define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION) -#else -#define NDEBUG (NDEBUG_ABORT) -#endif -  #define RESET_BOOT  #define DRIVER_SETUP @@ -260,6 +250,8 @@ int __init macscsi_detect(struct scsi_host_template * tpnt)      /* Once we support multiple 5380s (e.g. DuoDock) we'll do         something different here */      instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); +    if (instance == NULL) +	return 0;      if (macintosh_config->ident == MAC_MODEL_IIFX) {  	mac_scsi_regp  = via1+0x8000; diff --git a/drivers/scsi/mac_scsi.h b/drivers/scsi/mac_scsi.h index 7dc62fce1c4..06969b06e54 100644 --- a/drivers/scsi/mac_scsi.h +++ b/drivers/scsi/mac_scsi.h @@ -22,10 +22,6 @@   * 1+ (800) 334-5454   */ -/* - * $Log: cumana_NCR5380.h,v $ - */ -  #ifndef MAC_NCR5380_H  #define MAC_NCR5380_H @@ -51,8 +47,6 @@  #include <scsi/scsicam.h> -#ifndef HOSTS_C -  #define NCR5380_implementation_fields \      int port, ctrl @@ -75,10 +69,6 @@  #define NCR5380_show_info macscsi_show_info  #define NCR5380_write_info macscsi_write_info -#define BOARD_NORMAL	0 -#define BOARD_NCR53C400	1 - -#endif /* ndef HOSTS_C */  #endif /* ndef ASM */  #endif /* MAC_NCR5380_H */ diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 90c95a3385d..b7770516f4c 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -531,13 +531,6 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)  	int	target = 0;  	int	ldrv_num = 0;   /* logical drive number */ - -	/* -	 * filter the internal and ioctl commands -	 */ -	if((cmd->cmnd[0] == MEGA_INTERNAL_CMD)) -		return (scb_t *)cmd->host_scribble; -  	/*  	 * We know what channels our logical drives are on - mega_find_card()  	 */ @@ -1439,19 +1432,22 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)  		cmdid = completed[i]; -		if( cmdid == CMDID_INT_CMDS ) { /* internal command */ +		/* +		 * Only free SCBs for the commands coming down from the +		 * mid-layer, not for which were issued internally +		 * +		 * For internal command, restore the status returned by the +		 * firmware so that user can interpret it. +		 */ +		if (cmdid == CMDID_INT_CMDS) {  			scb = &adapter->int_scb; -			cmd = scb->cmd; -			mbox = (mbox_t *)scb->raw_mbox; -			/* -			 * Internal command interface do not fire the extended -			 * passthru or 64-bit passthru -			 */ -			pthru = scb->pthru; +			list_del_init(&scb->list); +			scb->state = SCB_FREE; -		} -		else { +			adapter->int_status = status; +			complete(&adapter->int_waitq); +		} else {  			scb = &adapter->scb_list[cmdid];  			/* @@ -1640,25 +1636,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)  				cmd->result |= (DID_BAD_TARGET << 16)|status;  		} -		/* -		 * Only free SCBs for the commands coming down from the -		 * mid-layer, not for which were issued internally -		 * -		 * For internal command, restore the status returned by the -		 * firmware so that user can interpret it. -		 */ -		if( cmdid == CMDID_INT_CMDS ) { /* internal command */ -			cmd->result = status; - -			/* -			 * Remove the internal command from the pending list -			 */ -			list_del_init(&scb->list); -			scb->state = SCB_FREE; -		} -		else { -			mega_free_scb(adapter, scb); -		} +		mega_free_scb(adapter, scb);  		/* Add Scsi_Command to end of completed queue */  		list_add_tail(SCSI_LIST(cmd), &adapter->completed_list); @@ -4133,23 +4111,15 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,   * The last argument is the address of the passthru structure if the command   * to be fired is a passthru command   * - * lockscope specifies whether the caller has already acquired the lock. Of - * course, the caller must know which lock we are talking about. - *   * Note: parameter 'pthru' is null for non-passthru commands.   */  static int  mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)  { -	Scsi_Cmnd	*scmd; -	struct	scsi_device *sdev; +	unsigned long flags;  	scb_t	*scb;  	int	rval; -	scmd = scsi_allocate_command(GFP_KERNEL); -	if (!scmd) -		return -ENOMEM; -  	/*  	 * The internal commands share one command id and hence are  	 * serialized. This is so because we want to reserve maximum number of @@ -4160,73 +4130,45 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)  	scb = &adapter->int_scb;  	memset(scb, 0, sizeof(scb_t)); -	sdev = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); -	scmd->device = sdev; - -	memset(adapter->int_cdb, 0, sizeof(adapter->int_cdb)); -	scmd->cmnd = adapter->int_cdb; -	scmd->device->host = adapter->host; -	scmd->host_scribble = (void *)scb; -	scmd->cmnd[0] = MEGA_INTERNAL_CMD; - -	scb->state |= SCB_ACTIVE; -	scb->cmd = scmd; +	scb->idx = CMDID_INT_CMDS; +	scb->state |= SCB_ACTIVE | SCB_PENDQ;  	memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));  	/*  	 * Is it a passthru command  	 */ -	if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) { - +	if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)  		scb->pthru = pthru; -	} - -	scb->idx = CMDID_INT_CMDS; -	megaraid_queue_lck(scmd, mega_internal_done); +	spin_lock_irqsave(&adapter->lock, flags); +	list_add_tail(&scb->list, &adapter->pending_list); +	/* +	 * Check if the HBA is in quiescent state, e.g., during a +	 * delete logical drive opertion. If it is, don't run +	 * the pending_list. +	 */ +	if (atomic_read(&adapter->quiescent) == 0) +		mega_runpendq(adapter); +	spin_unlock_irqrestore(&adapter->lock, flags);  	wait_for_completion(&adapter->int_waitq); -	rval = scmd->result; -	mc->status = scmd->result; -	kfree(sdev); +	mc->status = rval = adapter->int_status;  	/*  	 * Print a debug message for all failed commands. Applications can use  	 * this information.  	 */ -	if( scmd->result && trace_level ) { +	if (rval && trace_level) {  		printk("megaraid: cmd [%x, %x, %x] status:[%x]\n", -			mc->cmd, mc->opcode, mc->subopcode, scmd->result); +			mc->cmd, mc->opcode, mc->subopcode, rval);  	}  	mutex_unlock(&adapter->int_mtx); - -	scsi_free_command(GFP_KERNEL, scmd); -  	return rval;  } - -/** - * mega_internal_done() - * @scmd - internal scsi command - * - * Callback routine for internal commands. - */ -static void -mega_internal_done(Scsi_Cmnd *scmd) -{ -	adapter_t	*adapter; - -	adapter = (adapter_t *)scmd->device->host->hostdata; - -	complete(&adapter->int_waitq); - -} - -  static struct scsi_host_template megaraid_template = {  	.module				= THIS_MODULE,  	.name				= "MegaRAID", @@ -4244,6 +4186,7 @@ static struct scsi_host_template megaraid_template = {  	.eh_device_reset_handler	= megaraid_reset,  	.eh_bus_reset_handler		= megaraid_reset,  	.eh_host_reset_handler		= megaraid_reset, +	.no_write_same			= 1,  };  static int diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 4d0ce4e78df..508d65e5a51 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h @@ -853,10 +853,10 @@ typedef struct {  	u8	sglen;	/* f/w supported scatter-gather list length */ -	unsigned char int_cdb[MAX_COMMAND_SIZE];  	scb_t			int_scb;  	struct mutex		int_mtx;	/* To synchronize the internal  						commands */ +	int			int_status;	/* status of internal cmd */  	struct completion	int_waitq;	/* wait queue for internal  						 cmds */ @@ -1004,7 +1004,6 @@ static int mega_del_logdrv(adapter_t *, int);  static int mega_do_del_logdrv(adapter_t *, int);  static void mega_get_max_sgl(adapter_t *);  static int mega_internal_command(adapter_t *, megacmd_t *, mega_passthru *); -static void mega_internal_done(Scsi_Cmnd *);  static int mega_support_cluster(adapter_t *);  #endif diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index 515c9629e9f..e2237a97cb9 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -367,6 +367,7 @@ static struct scsi_host_template megaraid_template_g = {  	.eh_host_reset_handler		= megaraid_reset_handler,  	.change_queue_depth		= megaraid_change_queue_depth,  	.use_clustering			= ENABLE_CLUSTERING, +	.no_write_same			= 1,  	.sdev_attrs			= megaraid_sdev_attrs,  	.shost_attrs			= megaraid_shost_attrs,  }; @@ -534,7 +535,6 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	return 0;  out_cmm_unreg: -	pci_set_drvdata(pdev, NULL);  	megaraid_cmm_unregister(adapter);  out_fini_mbox:  	megaraid_fini_mbox(adapter); @@ -594,11 +594,6 @@ megaraid_detach_one(struct pci_dev *pdev)  	// detach from the IO sub-system  	megaraid_io_detach(adapter); -	// reset the device state in the PCI structure. We check this -	// condition when we enter here. If the device state is NULL, -	// that would mean the device has already been removed -	pci_set_drvdata(pdev, NULL); -  	// Unregister from common management module  	//  	// FIXME: this must return success or failure for conditions if there diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index dfffd0f3791..a70692779a1 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -486,6 +486,8 @@ mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)  	pthru32->dataxferaddr	= kioc->buf_paddr;  	if (kioc->data_dir & UIOC_WR) { +		if (pthru32->dataxferlen > kioc->xferlen) +			return -EINVAL;  		if (copy_from_user(kioc->buf_vaddr, kioc->user_data,  						pthru32->dataxferlen)) {  			return (-EFAULT); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 0c73ba4bf45..32166c2c785 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@  /*   * MegaRAID SAS Driver meta data   */ -#define MEGASAS_VERSION				"06.700.06.00-rc1" -#define MEGASAS_RELDATE				"Aug. 31, 2013" -#define MEGASAS_EXT_VERSION			"Sat. Aug. 31 17:00:00 PDT 2013" +#define MEGASAS_VERSION				"06.803.01.00-rc1" +#define MEGASAS_RELDATE				"Mar. 10, 2014" +#define MEGASAS_EXT_VERSION			"Mon. Mar. 10 17:00:00 PDT 2014"  /*   * Device IDs @@ -48,6 +48,7 @@  #define	PCI_DEVICE_ID_LSI_SAS0073SKINNY		0x0073  #define	PCI_DEVICE_ID_LSI_SAS0071SKINNY		0x0071  #define	PCI_DEVICE_ID_LSI_FUSION		0x005b +#define PCI_DEVICE_ID_LSI_PLASMA		0x002f  #define PCI_DEVICE_ID_LSI_INVADER		0x005d  #define PCI_DEVICE_ID_LSI_FURY			0x005f @@ -559,7 +560,8 @@ struct megasas_ctrl_info {  		u8 PCIE:1;  		u8 iSCSI:1;  		u8 SAS_3G:1; -		u8 reserved_0:4; +		u8 SRIOV:1; +		u8 reserved_0:3;  		u8 reserved_1[6];  		u8 port_count;  		u64 port_addr[8]; @@ -839,7 +841,12 @@ struct megasas_ctrl_info {  	struct {                                /*7A4h */  #if   defined(__BIG_ENDIAN_BITFIELD) -		u32     reserved:11; +		u32     reserved:5; +		u32	activePassive:2; +		u32	supportConfigAutoBalance:1; +		u32	mpio:1; +		u32	supportDataLDonSSCArray:1; +		u32	supportPointInTimeProgress:1;  		u32     supportUnevenSpans:1;  		u32     dedicatedHotSparesLimited:1;  		u32     headlessMode:1; @@ -886,7 +893,12 @@ struct megasas_ctrl_info {  		u32     supportUnevenSpans:1; -		u32     reserved:11; +		u32	supportPointInTimeProgress:1; +		u32	supportDataLDonSSCArray:1; +		u32	mpio:1; +		u32	supportConfigAutoBalance:1; +		u32	activePassive:2; +		u32     reserved:5;  #endif  	} adapterOperations2; @@ -914,8 +926,14 @@ struct megasas_ctrl_info {  	} cluster;  	char clusterId[16];                     /*7D4h */ +	struct { +		u8  maxVFsSupported;            /*0x7E4*/ +		u8  numVFsEnabled;              /*0x7E5*/ +		u8  requestorId;                /*0x7E6 0:PF, 1:VF1, 2:VF2*/ +		u8  reserved;                   /*0x7E7*/ +	} iov; -	u8          pad[0x800-0x7E4];           /*7E4 */ +	u8          pad[0x800-0x7E8];           /*0x7E8 pad to 2k */  } __packed;  /* @@ -986,7 +1004,9 @@ struct megasas_ctrl_info {  #define MFI_OB_INTR_STATUS_MASK			0x00000002  #define MFI_POLL_TIMEOUT_SECS			60 - +#define MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF	(5 * HZ) +#define MEGASAS_OCR_SETTLE_TIME_VF		(1000 * 30) +#define MEGASAS_ROUTINE_WAIT_TIME_VF		300  #define MFI_REPLY_1078_MESSAGE_INTERRUPT	0x80000000  #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT	0x00000001  #define MFI_GEN2_ENABLE_INTERRUPT_MASK		(0x00000001 | 0x00000004) @@ -1347,9 +1367,15 @@ struct megasas_cmd;  union megasas_evt_class_locale {  	struct { +#ifndef __BIG_ENDIAN_BITFIELD  		u16 locale;  		u8 reserved;  		s8 class; +#else +		s8 class; +		u8 reserved; +		u16 locale; +#endif  	} __attribute__ ((packed)) members;  	u32 word; @@ -1523,14 +1549,20 @@ struct megasas_instance {  	dma_addr_t producer_h;  	u32 *consumer;  	dma_addr_t consumer_h; +	struct MR_LD_VF_AFFILIATION *vf_affiliation; +	dma_addr_t vf_affiliation_h; +	struct MR_LD_VF_AFFILIATION_111 *vf_affiliation_111; +	dma_addr_t vf_affiliation_111_h; +	struct MR_CTRL_HB_HOST_MEM *hb_host_mem; +	dma_addr_t hb_host_mem_h;  	u32 *reply_queue;  	dma_addr_t reply_queue_h; -	unsigned long base_addr;  	struct megasas_register_set __iomem *reg_set;  	u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];  	struct megasas_pd_list          pd_list[MEGASAS_MAX_PD]; +	struct megasas_pd_list          local_pd_list[MEGASAS_MAX_PD];  	u8     ld_ids[MEGASAS_MAX_LD_IDS];  	s8 init_id; @@ -1598,10 +1630,73 @@ struct megasas_instance {  	unsigned long bar;  	long reset_flags;  	struct mutex reset_mutex; +	struct timer_list sriov_heartbeat_timer; +	char skip_heartbeat_timer_del; +	u8 requestorId; +	u64 initiator_sas_address; +	u64 ld_sas_address[64]; +	char PlasmaFW111; +	char mpio;  	int throttlequeuedepth;  	u8 mask_interrupts;  	u8 is_imr;  }; +struct MR_LD_VF_MAP { +	u32 size; +	union MR_LD_REF ref; +	u8 ldVfCount; +	u8 reserved[6]; +	u8 policy[1]; +}; + +struct MR_LD_VF_AFFILIATION { +	u32 size; +	u8 ldCount; +	u8 vfCount; +	u8 thisVf; +	u8 reserved[9]; +	struct MR_LD_VF_MAP map[1]; +}; + +/* Plasma 1.11 FW backward compatibility structures */ +#define IOV_111_OFFSET 0x7CE +#define MAX_VIRTUAL_FUNCTIONS 8 + +struct IOV_111 { +	u8 maxVFsSupported; +	u8 numVFsEnabled; +	u8 requestorId; +	u8 reserved[5]; +}; + +struct MR_LD_VF_MAP_111 { +	u8 targetId; +	u8 reserved[3]; +	u8 policy[MAX_VIRTUAL_FUNCTIONS]; +}; + +struct MR_LD_VF_AFFILIATION_111 { +	u8 vdCount; +	u8 vfCount; +	u8 thisVf; +	u8 reserved[5]; +	struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES]; +}; + +struct MR_CTRL_HB_HOST_MEM { +	struct { +		u32 fwCounter;	/* Firmware heart beat counter */ +		struct { +			u32 debugmode:1; /* 1=Firmware is in debug mode. +					    Heart beat will not be updated. */ +			u32 reserved:31; +		} debug; +		u32 reserved_fw[6]; +		u32 driverCounter; /* Driver heart beat counter.  0x20 */ +		u32 reserved_driver[7]; +	} HB; +	u8 pad[0x400-0x40]; +};  enum {  	MEGASAS_HBA_OPERATIONAL			= 0, @@ -1609,6 +1704,7 @@ enum {  	MEGASAS_ADPRESET_SM_FW_RESET_SUCCESS	= 2,  	MEGASAS_ADPRESET_SM_OPERATIONAL		= 3,  	MEGASAS_HW_CRITICAL_ERROR		= 4, +	MEGASAS_ADPRESET_SM_POLLING		= 5,  	MEGASAS_ADPRESET_INPROG_SIGN		= 0xDEADDEAD,  }; @@ -1728,7 +1824,7 @@ MR_BuildRaidContext(struct megasas_instance *instance,  		    struct IO_REQUEST_INFO *io_info,  		    struct RAID_CONTEXT *pRAID_Context,  		    struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN); -u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); +u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);  struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);  u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);  u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 3020921a474..112799b131a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,7 +18,7 @@   *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA   *   *  FILE: megaraid_sas_base.c - *  Version : 06.700.06.00-rc1 + *  Version : 06.803.01.00-rc1   *   *  Authors: LSI Corporation   *           Sreenivas Bagalkote @@ -75,6 +75,10 @@ static unsigned int msix_vectors;  module_param(msix_vectors, int, S_IRUGO);  MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW"); +static int allow_vf_ioctls; +module_param(allow_vf_ioctls, int, S_IRUGO); +MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0"); +  static int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;  module_param(throttlequeuedepth, int, S_IRUGO);  MODULE_PARM_DESC(throttlequeuedepth, @@ -122,6 +126,8 @@ static struct pci_device_id megasas_pci_table[] = {  	/* xscale IOP */  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},  	/* Fusion */ +	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)}, +	/* Plasma */  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},  	/* Invader */  	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)}, @@ -132,7 +138,7 @@ static struct pci_device_id megasas_pci_table[] = {  MODULE_DEVICE_TABLE(pci, megasas_pci_table);  static int megasas_mgmt_majorno; -static struct megasas_mgmt_info megasas_mgmt_info; +struct megasas_mgmt_info megasas_mgmt_info;  static struct fasync_struct *megasas_async_queue;  static DEFINE_MUTEX(megasas_async_queue_mutex); @@ -171,10 +177,15 @@ megasas_get_map_info(struct megasas_instance *instance);  int  megasas_sync_map_info(struct megasas_instance *instance);  int -wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); +wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, +	int seconds);  void megasas_reset_reply_desc(struct megasas_instance *instance); -int megasas_reset_fusion(struct Scsi_Host *shost); +int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout);  void megasas_fusion_ocr_wq(struct work_struct *work); +static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, +					 int initial); +int megasas_check_mpio_paths(struct megasas_instance *instance, +			     struct scsi_cmnd *scmd);  void  megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) @@ -224,6 +235,7 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)  	cmd->scmd = NULL;  	cmd->frame_count = 0;  	if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && +	    (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&  	    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&  	    (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&  	    (reset_devices)) @@ -877,6 +889,7 @@ extern struct megasas_instance_template megasas_instance_template_fusion;  int  megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)  { +	int seconds;  	struct megasas_header *frame_hdr = &cmd->frame->hdr; @@ -891,13 +904,18 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)  	/*  	 * Wait for cmd_status to change  	 */ -	return wait_and_poll(instance, cmd); +	if (instance->requestorId) +		seconds = MEGASAS_ROUTINE_WAIT_TIME_VF; +	else +		seconds = MFI_POLL_TIMEOUT_SECS; +	return wait_and_poll(instance, cmd, seconds);  }  /**   * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds   * @instance:			Adapter soft state   * @cmd:			Command to be issued + * @timeout:			Timeout in seconds   *   * This function waits on an event for the command to be returned from ISR.   * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs @@ -905,13 +923,20 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)   */  static int  megasas_issue_blocked_cmd(struct megasas_instance *instance, -			  struct megasas_cmd *cmd) +			  struct megasas_cmd *cmd, int timeout)  { +	int ret = 0;  	cmd->cmd_status = ENODATA;  	instance->instancet->issue_dcmd(instance, cmd); - -	wait_event(instance->int_cmd_wait_q, cmd->cmd_status != ENODATA); +	if (timeout) { +		ret = wait_event_timeout(instance->int_cmd_wait_q, +				cmd->cmd_status != ENODATA, timeout * HZ); +		if (!ret) +			return 1; +	} else +		wait_event(instance->int_cmd_wait_q, +				cmd->cmd_status != ENODATA);  	return 0;  } @@ -920,18 +945,20 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,   * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd   * @instance:				Adapter soft state   * @cmd_to_abort:			Previously issued cmd to be aborted + * @timeout:				Timeout in seconds   * - * MFI firmware can abort previously issued AEN command (automatic event + * MFI firmware can abort previously issued AEN comamnd (automatic event   * notification). The megasas_issue_blocked_abort_cmd() issues such abort   * cmd and waits for return status.   * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs   */  static int  megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, -				struct megasas_cmd *cmd_to_abort) +				struct megasas_cmd *cmd_to_abort, int timeout)  {  	struct megasas_cmd *cmd;  	struct megasas_abort_frame *abort_fr; +	int ret = 0;  	cmd = megasas_get_cmd(instance); @@ -957,10 +984,18 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,  	instance->instancet->issue_dcmd(instance, cmd); -	/* -	 * Wait for this cmd to complete -	 */ -	wait_event(instance->abort_cmd_wait_q, cmd->cmd_status != 0xFF); +	if (timeout) { +		ret = wait_event_timeout(instance->abort_cmd_wait_q, +				cmd->cmd_status != ENODATA, timeout * HZ); +		if (!ret) { +			dev_err(&instance->pdev->dev, "Command timedout" +				"from %s\n", __func__); +			return 1; +		} +	} else +		wait_event(instance->abort_cmd_wait_q, +				cmd->cmd_status != ENODATA); +  	cmd->sync_cmd = 0;  	megasas_return_cmd(instance, cmd); @@ -1514,9 +1549,23 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd  	spin_lock_irqsave(&instance->hba_lock, flags); +	/* Check for an mpio path and adjust behavior */ +	if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) { +		if (megasas_check_mpio_paths(instance, scmd) == +		    (DID_RESET << 16)) { +			spin_unlock_irqrestore(&instance->hba_lock, flags); +			return SCSI_MLQUEUE_HOST_BUSY; +		} else { +			spin_unlock_irqrestore(&instance->hba_lock, flags); +			scmd->result = DID_NO_CONNECT << 16; +			done(scmd); +			return 0; +		} +	} +  	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {  		spin_unlock_irqrestore(&instance->hba_lock, flags); -		scmd->result = DID_ERROR << 16; +		scmd->result = DID_NO_CONNECT << 16;  		done(scmd);  		return 0;  	} @@ -1641,9 +1690,14 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || +	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {  		writel(MFI_STOP_ADP, &instance->reg_set->doorbell); +		/* Flush */ +		readl(&instance->reg_set->doorbell); +		if (instance->mpio && instance->requestorId) +			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);  	} else {  		writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);  	} @@ -1730,6 +1784,25 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)  	megasas_check_and_restore_queue_depth(instance);  } +/** + * megasas_start_timer - Initializes a timer object + * @instance:		Adapter soft state + * @timer:		timer object to be initialized + * @fn:			timer function + * @interval:		time interval between timer function call + * + */ +void megasas_start_timer(struct megasas_instance *instance, +			struct timer_list *timer, +			void *fn, unsigned long interval) +{ +	init_timer(timer); +	timer->expires = jiffies + interval; +	timer->data = (unsigned long)instance; +	timer->function = fn; +	add_timer(timer); +} +  static void  megasas_internal_reset_defer_cmds(struct megasas_instance *instance); @@ -1752,6 +1825,295 @@ void megasas_do_ocr(struct megasas_instance *instance)  	process_fw_state_change_wq(&instance->work_init);  } +/* This function will get the current SR-IOV LD/VF affiliation */ +static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, +	int initial) +{ +	struct megasas_cmd *cmd; +	struct megasas_dcmd_frame *dcmd; +	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL; +	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; +	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL; +	dma_addr_t new_affiliation_h; +	dma_addr_t new_affiliation_111_h; +	int ld, retval = 0; +	u8 thisVf; + +	cmd = megasas_get_cmd(instance); + +	if (!cmd) { +		printk(KERN_DEBUG "megasas: megasas_get_ld_vf_" +		       "affiliation: Failed to get cmd for scsi%d.\n", +			instance->host->host_no); +		return -ENOMEM; +	} + +	dcmd = &cmd->frame->dcmd; + +	if (!instance->vf_affiliation && !instance->vf_affiliation_111) { +		printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF " +		       "affiliation for scsi%d.\n", instance->host->host_no); +		megasas_return_cmd(instance, cmd); +		return -ENOMEM; +	} + +	if (initial) +		if (instance->PlasmaFW111) +			memset(instance->vf_affiliation_111, 0, +			       sizeof(struct MR_LD_VF_AFFILIATION_111)); +		else +			memset(instance->vf_affiliation, 0, +			       (MAX_LOGICAL_DRIVES + 1) * +			       sizeof(struct MR_LD_VF_AFFILIATION)); +	else { +		if (instance->PlasmaFW111) +			new_affiliation_111 = +				pci_alloc_consistent(instance->pdev, +						     sizeof(struct MR_LD_VF_AFFILIATION_111), +						     &new_affiliation_111_h); +		else +			new_affiliation = +				pci_alloc_consistent(instance->pdev, +						     (MAX_LOGICAL_DRIVES + 1) * +						     sizeof(struct MR_LD_VF_AFFILIATION), +						     &new_affiliation_h); +		if (!new_affiliation && !new_affiliation_111) { +			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate " +			       "memory for new affiliation for scsi%d.\n", +				instance->host->host_no); +			megasas_return_cmd(instance, cmd); +			return -ENOMEM; +		} +		if (instance->PlasmaFW111) +			memset(new_affiliation_111, 0, +			       sizeof(struct MR_LD_VF_AFFILIATION_111)); +		else +			memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) * +			       sizeof(struct MR_LD_VF_AFFILIATION)); +	} + +	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + +	dcmd->cmd = MFI_CMD_DCMD; +	dcmd->cmd_status = 0xFF; +	dcmd->sge_count = 1; +	dcmd->flags = MFI_FRAME_DIR_BOTH; +	dcmd->timeout = 0; +	dcmd->pad_0 = 0; +	if (instance->PlasmaFW111) { +		dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111); +		dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111; +	} else { +		dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) * +			sizeof(struct MR_LD_VF_AFFILIATION); +		dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS; +	} + +	if (initial) { +		if (instance->PlasmaFW111) +			dcmd->sgl.sge32[0].phys_addr = +			  instance->vf_affiliation_111_h; +		else +			dcmd->sgl.sge32[0].phys_addr = +			  instance->vf_affiliation_h; +	} else { +		if (instance->PlasmaFW111) +			dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h; +		else +			dcmd->sgl.sge32[0].phys_addr = new_affiliation_h; +	} +	if (instance->PlasmaFW111) +		dcmd->sgl.sge32[0].length = +		  sizeof(struct MR_LD_VF_AFFILIATION_111); +	else +		dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) * +			sizeof(struct MR_LD_VF_AFFILIATION); + +	printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " +	       "scsi%d\n", instance->host->host_no); + +	megasas_issue_blocked_cmd(instance, cmd, 0); + +	if (dcmd->cmd_status) { +		printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD" +		       " failed with status 0x%x for scsi%d.\n", +		       dcmd->cmd_status, instance->host->host_no); +		retval = 1; /* Do a scan if we couldn't get affiliation */ +		goto out; +	} + +	if (!initial) { +		if (instance->PlasmaFW111) { +			if (!new_affiliation_111->vdCount) { +				printk(KERN_WARNING "megasas: SR-IOV: Got new " +				       "LD/VF affiliation for passive path " +				       "for scsi%d.\n", +					instance->host->host_no); +				retval = 1; +				goto out; +			} +			thisVf = new_affiliation_111->thisVf; +			for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) +				if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) { +					printk(KERN_WARNING "megasas: SR-IOV: " +					       "Got new LD/VF affiliation " +					       "for scsi%d.\n", +						instance->host->host_no); +					memcpy(instance->vf_affiliation_111, +					       new_affiliation_111, +					       sizeof(struct MR_LD_VF_AFFILIATION_111)); +					retval = 1; +					goto out; +				} +		} else { +			if (!new_affiliation->ldCount) { +				printk(KERN_WARNING "megasas: SR-IOV: Got new " +				       "LD/VF affiliation for passive " +				       "path for scsi%d.\n", +				       instance->host->host_no); +				retval = 1; +				goto out; +			} +			newmap = new_affiliation->map; +			savedmap = instance->vf_affiliation->map; +			thisVf = new_affiliation->thisVf; +			for (ld = 0 ; ld < new_affiliation->ldCount; ld++) { +				if (savedmap->policy[thisVf] != +				    newmap->policy[thisVf]) { +					printk(KERN_WARNING "megasas: SR-IOV: " +					       "Got new LD/VF affiliation " +					       "for scsi%d.\n", +						instance->host->host_no); +					memcpy(instance->vf_affiliation, +					       new_affiliation, +					       new_affiliation->size); +					retval = 1; +					goto out; +				} +				savedmap = (struct MR_LD_VF_MAP *) +					((unsigned char *)savedmap + +					 savedmap->size); +				newmap = (struct MR_LD_VF_MAP *) +					((unsigned char *)newmap + +					 newmap->size); +			} +		} +	} +out: +	if (new_affiliation) { +		if (instance->PlasmaFW111) +			pci_free_consistent(instance->pdev, +					    sizeof(struct MR_LD_VF_AFFILIATION_111), +					    new_affiliation_111, +					    new_affiliation_111_h); +		else +			pci_free_consistent(instance->pdev, +					    (MAX_LOGICAL_DRIVES + 1) * +					    sizeof(struct MR_LD_VF_AFFILIATION), +					    new_affiliation, new_affiliation_h); +	} +	megasas_return_cmd(instance, cmd); + +	return retval; +} + +/* This function will tell FW to start the SR-IOV heartbeat */ +int megasas_sriov_start_heartbeat(struct megasas_instance *instance, +					 int initial) +{ +	struct megasas_cmd *cmd; +	struct megasas_dcmd_frame *dcmd; +	int retval = 0; + +	cmd = megasas_get_cmd(instance); + +	if (!cmd) { +		printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: " +		       "Failed to get cmd for scsi%d.\n", +		       instance->host->host_no); +		return -ENOMEM; +	} + +	dcmd = &cmd->frame->dcmd; + +	if (initial) { +		instance->hb_host_mem = +			pci_alloc_consistent(instance->pdev, +					     sizeof(struct MR_CTRL_HB_HOST_MEM), +					     &instance->hb_host_mem_h); +		if (!instance->hb_host_mem) { +			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate" +			       " memory for heartbeat host memory for " +			       "scsi%d.\n", instance->host->host_no); +			retval = -ENOMEM; +			goto out; +		} +		memset(instance->hb_host_mem, 0, +		       sizeof(struct MR_CTRL_HB_HOST_MEM)); +	} + +	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); + +	dcmd->mbox.s[0] = sizeof(struct MR_CTRL_HB_HOST_MEM); +	dcmd->cmd = MFI_CMD_DCMD; +	dcmd->cmd_status = 0xFF; +	dcmd->sge_count = 1; +	dcmd->flags = MFI_FRAME_DIR_BOTH; +	dcmd->timeout = 0; +	dcmd->pad_0 = 0; +	dcmd->data_xfer_len = sizeof(struct MR_CTRL_HB_HOST_MEM); +	dcmd->opcode = MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC; +	dcmd->sgl.sge32[0].phys_addr = instance->hb_host_mem_h; +	dcmd->sgl.sge32[0].length = sizeof(struct MR_CTRL_HB_HOST_MEM); + +	printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n", +	       instance->host->host_no); + +	if (!megasas_issue_polled(instance, cmd)) { +		retval = 0; +	} else { +		printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST" +		       "_MEM_ALLOC DCMD timed out for scsi%d\n", +		       instance->host->host_no); +		retval = 1; +		goto out; +	} + + +	if (dcmd->cmd_status) { +		printk(KERN_WARNING "megasas: SR-IOV: MR_DCMD_CTRL_SHARED_HOST" +		       "_MEM_ALLOC DCMD failed with status 0x%x for scsi%d\n", +		       dcmd->cmd_status, +		       instance->host->host_no); +		retval = 1; +		goto out; +	} + +out: +	megasas_return_cmd(instance, cmd); + +	return retval; +} + +/* Handler for SR-IOV heartbeat */ +void megasas_sriov_heartbeat_handler(unsigned long instance_addr) +{ +	struct megasas_instance *instance = +		(struct megasas_instance *)instance_addr; + +	if (instance->hb_host_mem->HB.fwCounter != +	    instance->hb_host_mem->HB.driverCounter) { +		instance->hb_host_mem->HB.driverCounter = +			instance->hb_host_mem->HB.fwCounter; +		mod_timer(&instance->sriov_heartbeat_timer, +			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); +	} else { +		printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never " +		       "completed for scsi%d\n", instance->host->host_no); +		schedule_work(&instance->work_init); +	} +} +  /**   * megasas_wait_for_outstanding -	Wait for all outstanding cmds   * @instance:				Adapter soft state @@ -2014,9 +2376,10 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)  	 * First wait for all commands to complete  	 */  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || +	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) -		ret = megasas_reset_fusion(scmd->device->host); +		ret = megasas_reset_fusion(scmd->device->host, 1);  	else  		ret = megasas_generic_reset(scmd); @@ -2148,6 +2511,7 @@ static struct scsi_host_template megasas_template = {  	.bios_param = megasas_bios_param,  	.use_clustering = ENABLE_CLUSTERING,  	.change_queue_depth = megasas_change_queue_depth, +	.no_write_same = 1,  };  /** @@ -2697,7 +3061,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)  	u32 cur_state;  	u32 abs_state, curr_abs_state; -	fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK; +	abs_state = instance->instancet->read_fw_status_reg(instance->reg_set); +	fw_state = abs_state & MFI_STATE_MASK;  	if (fw_state != MFI_STATE_READY)  		printk(KERN_INFO "megasas: Waiting for FW to come to ready" @@ -2705,9 +3070,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)  	while (fw_state != MFI_STATE_READY) { -		abs_state = -		instance->instancet->read_fw_status_reg(instance->reg_set); -  		switch (fw_state) {  		case MFI_STATE_FAULT: @@ -2730,6 +3092,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)  				(instance->pdev->device ==  				PCI_DEVICE_ID_LSI_FUSION) ||  				(instance->pdev->device == +				PCI_DEVICE_ID_LSI_PLASMA) || +				(instance->pdev->device ==  				PCI_DEVICE_ID_LSI_INVADER) ||  				(instance->pdev->device ==  				PCI_DEVICE_ID_LSI_FURY)) { @@ -2754,6 +3118,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)  			    (instance->pdev->device ==  			     PCI_DEVICE_ID_LSI_FUSION) ||  			    (instance->pdev->device == +			     PCI_DEVICE_ID_LSI_PLASMA) || +			    (instance->pdev->device ==  			     PCI_DEVICE_ID_LSI_INVADER) ||  			    (instance->pdev->device ==  			     PCI_DEVICE_ID_LSI_FURY)) { @@ -2779,6 +3145,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)  				(instance->pdev->device  					== PCI_DEVICE_ID_LSI_FUSION) ||  				(instance->pdev->device +					== PCI_DEVICE_ID_LSI_PLASMA) || +				(instance->pdev->device  					== PCI_DEVICE_ID_LSI_INVADER) ||  				(instance->pdev->device  					== PCI_DEVICE_ID_LSI_FURY)) { @@ -2787,6 +3155,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)  				if ((instance->pdev->device ==  					PCI_DEVICE_ID_LSI_FUSION) ||  					(instance->pdev->device == +					PCI_DEVICE_ID_LSI_PLASMA) || +					(instance->pdev->device ==  					PCI_DEVICE_ID_LSI_INVADER) ||  					(instance->pdev->device ==  					PCI_DEVICE_ID_LSI_FURY)) { @@ -2851,10 +3221,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)  		 * The cur_state should not last for more than max_wait secs  		 */  		for (i = 0; i < (max_wait * 1000); i++) { -			fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & -					MFI_STATE_MASK ; -		curr_abs_state = -		instance->instancet->read_fw_status_reg(instance->reg_set); +			curr_abs_state = instance->instancet-> +				read_fw_status_reg(instance->reg_set);  			if (abs_state == curr_abs_state) {  				msleep(1); @@ -2870,6 +3238,9 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)  			       "in %d secs\n", fw_state, max_wait);  			return -ENODEV;  		} + +		abs_state = curr_abs_state; +		fw_state = curr_abs_state & MFI_STATE_MASK;  	}  	printk(KERN_INFO "megasas: FW now in Ready state\n"); @@ -3013,6 +3384,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)  		cmd->frame->io.context = cpu_to_le32(cmd->index);  		cmd->frame->io.pad_0 = 0;  		if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && +		    (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&  		    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&  			(instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&  		    (reset_devices)) @@ -3194,19 +3566,21 @@ megasas_get_pd_list(struct megasas_instance *instance)  	     (le32_to_cpu(ci->count) <  		  (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { -		memset(instance->pd_list, 0, +		memset(instance->local_pd_list, 0,  			MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));  		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { -			instance->pd_list[pd_addr->deviceId].tid	= +			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=  				le16_to_cpu(pd_addr->deviceId); -			instance->pd_list[pd_addr->deviceId].driveType	= +			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=  							pd_addr->scsiDevType; -			instance->pd_list[pd_addr->deviceId].driveState	= +			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=  							MR_PD_STATE_SYSTEM;  			pd_addr++;  		} +		memcpy(instance->pd_list, instance->local_pd_list, +			sizeof(instance->pd_list));  	}  	pci_free_consistent(instance->pdev, @@ -3612,22 +3986,24 @@ static int megasas_init_fw(struct megasas_instance *instance)  	u32 max_sectors_1;  	u32 max_sectors_2;  	u32 tmp_sectors, msix_enable, scratch_pad_2; +	resource_size_t base_addr;  	struct megasas_register_set __iomem *reg_set;  	struct megasas_ctrl_info *ctrl_info;  	unsigned long bar_list;  	int i, loop, fw_msix_count = 0; +	struct IOV_111 *iovPtr;  	/* Find first memory bar */  	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);  	instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); -	instance->base_addr = pci_resource_start(instance->pdev, instance->bar);  	if (pci_request_selected_regions(instance->pdev, instance->bar,  					 "megasas: LSI")) {  		printk(KERN_DEBUG "megasas: IO memory region busy!\n");  		return -EBUSY;  	} -	instance->reg_set = ioremap_nocache(instance->base_addr, 8192); +	base_addr = pci_resource_start(instance->pdev, instance->bar); +	instance->reg_set = ioremap_nocache(base_addr, 8192);  	if (!instance->reg_set) {  		printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); @@ -3638,6 +4014,7 @@ static int megasas_init_fw(struct megasas_instance *instance)  	switch (instance->pdev->device) {  	case PCI_DEVICE_ID_LSI_FUSION: +	case PCI_DEVICE_ID_LSI_PLASMA:  	case PCI_DEVICE_ID_LSI_INVADER:  	case PCI_DEVICE_ID_LSI_FURY:  		instance->instancet = &megasas_instance_template_fusion; @@ -3692,7 +4069,8 @@ static int megasas_init_fw(struct megasas_instance *instance)  		scratch_pad_2 = readl  			(&instance->reg_set->outbound_scratch_pad_2);  		/* Check max MSI-X vectors */ -		if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) { +		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || +		    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA)) {  			instance->msix_vectors = (scratch_pad_2  				& MR_MAX_REPLY_QUEUES_OFFSET) + 1;  			fw_msix_count = instance->msix_vectors; @@ -3759,7 +4137,10 @@ static int megasas_init_fw(struct megasas_instance *instance)  	memset(instance->pd_list, 0 ,  		(MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); -	megasas_get_pd_list(instance); +	if (megasas_get_pd_list(instance) < 0) { +		printk(KERN_ERR "megasas: failed to get PD list\n"); +		goto fail_init_adapter; +	}  	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);  	if (megasas_ld_list_query(instance, @@ -3803,6 +4184,7 @@ static int megasas_init_fw(struct megasas_instance *instance)  		ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;  		/* adapterOperations2 are converted into CPU arch*/  		le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); +		instance->mpio = ctrl_info->adapterOperations2.mpio;  		instance->UnevenSpanSupport =  			ctrl_info->adapterOperations2.supportUnevenSpans;  		if (instance->UnevenSpanSupport) { @@ -3815,6 +4197,20 @@ static int megasas_init_fw(struct megasas_instance *instance)  				fusion->fast_path_io = 0;  		} +		if (ctrl_info->host_interface.SRIOV) { +			if (!ctrl_info->adapterOperations2.activePassive) +				instance->PlasmaFW111 = 1; + +			if (!instance->PlasmaFW111) +				instance->requestorId = +					ctrl_info->iov.requestorId; +			else { +				iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET); +				instance->requestorId = iovPtr->requestorId; +			} +			printk(KERN_WARNING "megaraid_sas: I am VF " +			       "requestorId %d\n", instance->requestorId); +		}  	}  	instance->max_sectors_per_req = instance->max_num_sge *  						PAGE_SIZE / 512; @@ -3847,6 +4243,17 @@ static int megasas_init_fw(struct megasas_instance *instance)  	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,  		(unsigned long)instance); +	/* Launch SR-IOV heartbeat timer */ +	if (instance->requestorId) { +		if (!megasas_sriov_start_heartbeat(instance, 1)) +			megasas_start_timer(instance, +					    &instance->sriov_heartbeat_timer, +					    megasas_sriov_heartbeat_handler, +					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); +		else +			instance->skip_heartbeat_timer_del = 1; +	} +  	return 0;  fail_init_adapter: @@ -3929,16 +4336,19 @@ megasas_get_seq_num(struct megasas_instance *instance,  	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h);  	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); -	megasas_issue_blocked_cmd(instance, cmd); - -	/* -	 * Copy the data back into callers buffer -	 */ -	eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); -	eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); -	eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); -	eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); -	eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); +	if (megasas_issue_blocked_cmd(instance, cmd, 30)) +		dev_err(&instance->pdev->dev, "Command timedout" +			"from %s\n", __func__); +	else { +		/* +		 * Copy the data back into callers buffer +		 */ +		eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); +		eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); +		eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); +		eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); +		eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); +	}  	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),  			    el_info, el_info_h); @@ -3998,7 +4408,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,  		 * values  		 */  		if ((prev_aen.members.class <= curr_aen.members.class) && -		    !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^ +		    !((prev_aen.members.locale & curr_aen.members.locale) ^  		      curr_aen.members.locale)) {  			/*  			 * Previously issued event registration includes @@ -4006,7 +4416,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,  			 */  			return 0;  		} else { -			curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale); +			curr_aen.members.locale |= prev_aen.members.locale;  			if (prev_aen.members.class < curr_aen.members.class)  				curr_aen.members.class = prev_aen.members.class; @@ -4014,7 +4424,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,  			instance->aen_cmd->abort_aen = 1;  			ret_val = megasas_issue_blocked_abort_cmd(instance,  								  instance-> -								  aen_cmd); +								  aen_cmd, 30);  			if (ret_val) {  				printk(KERN_DEBUG "megasas: Failed to abort " @@ -4097,7 +4507,7 @@ static int megasas_start_aen(struct megasas_instance *instance)  	class_locale.members.class = MR_EVT_CLASS_DEBUG;  	return megasas_register_aen(instance, -			le32_to_cpu(eli.newest_seq_num) + 1, +			eli.newest_seq_num + 1,  			class_locale.word);  } @@ -4156,6 +4566,7 @@ static int megasas_io_attach(struct megasas_instance *instance)  	/* Fusion only supports host reset */  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || +	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {  		host->hostt->eh_device_reset_handler = NULL; @@ -4193,6 +4604,19 @@ megasas_set_dma_mask(struct pci_dev *pdev)  		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)  			goto fail_set_dma_mask;  	} +	/* +	 * Ensure that all data structures are allocated in 32-bit +	 * memory. +	 */ +	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { +		/* Try 32bit DMA mask and 32 bit Consistent dma mask */ +		if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) +			&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) +			dev_info(&pdev->dev, "set 32bit DMA mask" +				"and 32 bit consistent mask\n"); +		else +			goto fail_set_dma_mask; +	}  	return 0; @@ -4208,7 +4632,7 @@ fail_set_dma_mask:  static int megasas_probe_one(struct pci_dev *pdev,  			     const struct pci_device_id *id)  { -	int rval, pos, i, j; +	int rval, pos, i, j, cpu;  	struct Scsi_Host *host;  	struct megasas_instance *instance;  	u16 control = 0; @@ -4268,6 +4692,7 @@ static int megasas_probe_one(struct pci_dev *pdev,  	switch (instance->pdev->device) {  	case PCI_DEVICE_ID_LSI_FUSION: +	case PCI_DEVICE_ID_LSI_PLASMA:  	case PCI_DEVICE_ID_LSI_INVADER:  	case PCI_DEVICE_ID_LSI_FURY:  	{ @@ -4364,6 +4789,7 @@ static int megasas_probe_one(struct pci_dev *pdev,  	instance->UnevenSpanSupport = 0;  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || +	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))  		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); @@ -4376,12 +4802,33 @@ static int megasas_probe_one(struct pci_dev *pdev,  	if (megasas_init_fw(instance))  		goto fail_init_mfi; +	if (instance->requestorId) { +		if (instance->PlasmaFW111) { +			instance->vf_affiliation_111 = +				pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), +						     &instance->vf_affiliation_111_h); +			if (!instance->vf_affiliation_111) +				printk(KERN_WARNING "megasas: Can't allocate " +				       "memory for VF affiliation buffer\n"); +		} else { +			instance->vf_affiliation = +				pci_alloc_consistent(pdev, +						     (MAX_LOGICAL_DRIVES + 1) * +						     sizeof(struct MR_LD_VF_AFFILIATION), +						     &instance->vf_affiliation_h); +			if (!instance->vf_affiliation) +				printk(KERN_WARNING "megasas: Can't allocate " +				       "memory for VF affiliation buffer\n"); +		} +	} +  retry_irq_register:  	/*  	 * Register IRQ  	 */  	if (instance->msix_vectors) { -		for (i = 0 ; i < instance->msix_vectors; i++) { +		cpu = cpumask_first(cpu_online_mask); +		for (i = 0; i < instance->msix_vectors; i++) {  			instance->irq_context[i].instance = instance;  			instance->irq_context[i].MSIxIndex = i;  			if (request_irq(instance->msixentry[i].vector, @@ -4390,14 +4837,22 @@ retry_irq_register:  					&instance->irq_context[i])) {  				printk(KERN_DEBUG "megasas: Failed to "  				       "register IRQ for vector %d.\n", i); -				for (j = 0 ; j < i ; j++) +				for (j = 0; j < i; j++) { +					irq_set_affinity_hint( +						instance->msixentry[j].vector, NULL);  					free_irq(  						instance->msixentry[j].vector,  						&instance->irq_context[j]); +				}  				/* Retry irq register for IO_APIC */  				instance->msix_vectors = 0;  				goto retry_irq_register;  			} +			if (irq_set_affinity_hint(instance->msixentry[i].vector, +				get_cpu_mask(cpu))) +				dev_err(&instance->pdev->dev, "Error setting" +					"affinity hint for cpu %d\n", cpu); +			cpu = cpumask_next(cpu, cpu_online_mask);  		}  	} else {  		instance->irq_context[0].instance = instance; @@ -4449,16 +4904,19 @@ retry_irq_register:  	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;  	megasas_mgmt_info.max_index--; -	pci_set_drvdata(pdev, NULL);  	instance->instancet->disable_intr(instance);  	if (instance->msix_vectors) -		for (i = 0 ; i < instance->msix_vectors; i++) +		for (i = 0; i < instance->msix_vectors; i++) { +			irq_set_affinity_hint( +				instance->msixentry[i].vector, NULL);  			free_irq(instance->msixentry[i].vector,  				 &instance->irq_context[i]); +		}  	else  		free_irq(instance->pdev->irq, &instance->irq_context[0]);  fail_irq:  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || +	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||  	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))  		megasas_release_fusion(instance); @@ -4519,7 +4977,9 @@ static void megasas_flush_cache(struct megasas_instance *instance)  	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);  	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; -	megasas_issue_blocked_cmd(instance, cmd); +	if (megasas_issue_blocked_cmd(instance, cmd, 30)) +		dev_err(&instance->pdev->dev, "Command timedout" +			" from %s\n", __func__);  	megasas_return_cmd(instance, cmd); @@ -4546,10 +5006,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,  		return;  	if (instance->aen_cmd) -		megasas_issue_blocked_abort_cmd(instance, instance->aen_cmd); +		megasas_issue_blocked_abort_cmd(instance, +			instance->aen_cmd, 30);  	if (instance->map_update_cmd)  		megasas_issue_blocked_abort_cmd(instance, -						instance->map_update_cmd); +			instance->map_update_cmd, 30);  	dcmd = &cmd->frame->dcmd;  	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); @@ -4563,7 +5024,9 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,  	dcmd->data_xfer_len = 0;  	dcmd->opcode = cpu_to_le32(opcode); -	megasas_issue_blocked_cmd(instance, cmd); +	if (megasas_issue_blocked_cmd(instance, cmd, 30)) +		dev_err(&instance->pdev->dev, "Command timedout" +			"from %s\n", __func__);  	megasas_return_cmd(instance, cmd); @@ -4587,6 +5050,10 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)  	host = instance->host;  	instance->unload = 1; +	/* Shutdown SR-IOV heartbeat timer */ +	if (instance->requestorId && !instance->skip_heartbeat_timer_del) +		del_timer_sync(&instance->sriov_heartbeat_timer); +  	megasas_flush_cache(instance);  	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN); @@ -4603,9 +5070,12 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)  	instance->instancet->disable_intr(instance);  	if (instance->msix_vectors) -		for (i = 0 ; i < instance->msix_vectors; i++) +		for (i = 0; i < instance->msix_vectors; i++) { +			irq_set_affinity_hint( +				instance->msixentry[i].vector, NULL);  			free_irq(instance->msixentry[i].vector,  				 &instance->irq_context[i]); +		}  	else  		free_irq(instance->pdev->irq, &instance->irq_context[0]);  	if (instance->msix_vectors) @@ -4626,7 +5096,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)  static int  megasas_resume(struct pci_dev *pdev)  { -	int rval, i, j; +	int rval, i, j, cpu;  	struct Scsi_Host *host;  	struct megasas_instance *instance; @@ -4670,6 +5140,7 @@ megasas_resume(struct pci_dev *pdev)  	switch (instance->pdev->device) {  	case PCI_DEVICE_ID_LSI_FUSION: +	case PCI_DEVICE_ID_LSI_PLASMA:  	case PCI_DEVICE_ID_LSI_INVADER:  	case PCI_DEVICE_ID_LSI_FURY:  	{ @@ -4698,6 +5169,7 @@ megasas_resume(struct pci_dev *pdev)  	 * Register IRQ  	 */  	if (instance->msix_vectors) { +		cpu = cpumask_first(cpu_online_mask);  		for (i = 0 ; i < instance->msix_vectors; i++) {  			instance->irq_context[i].instance = instance;  			instance->irq_context[i].MSIxIndex = i; @@ -4707,12 +5179,21 @@ megasas_resume(struct pci_dev *pdev)  					&instance->irq_context[i])) {  				printk(KERN_DEBUG "megasas: Failed to "  				       "register IRQ for vector %d.\n", i); -				for (j = 0 ; j < i ; j++) +				for (j = 0; j < i; j++) { +					irq_set_affinity_hint( +						instance->msixentry[j].vector, NULL);  					free_irq(  						instance->msixentry[j].vector,  						&instance->irq_context[j]); +				}  				goto fail_irq;  			} + +			if (irq_set_affinity_hint(instance->msixentry[i].vector, +				get_cpu_mask(cpu))) +				dev_err(&instance->pdev->dev, "Error setting" +					"affinity hint for cpu %d\n", cpu); +			cpu = cpumask_next(cpu, cpu_online_mask);  		}  	} else {  		instance->irq_context[0].instance = instance; @@ -4725,6 +5206,17 @@ megasas_resume(struct pci_dev *pdev)  		}  	} +	/* Re-launch SR-IOV heartbeat timer */ +	if (instance->requestorId) { +		if (!megasas_sriov_start_heartbeat(instance, 0)) +			megasas_start_timer(instance, +					    &instance->sriov_heartbeat_timer, +					    megasas_sriov_heartbeat_handler, +					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); +		else +			instance->skip_heartbeat_timer_del = 1; +	} +  	instance->instancet->enable_intr(instance);  	instance->unload = 0; @@ -4779,6 +5271,10 @@ static void megasas_detach_one(struct pci_dev *pdev)  	host = instance->host;  	fusion = instance->ctrl_context; +	/* Shutdown SR-IOV heartbeat timer */ +	if (instance->requestorId && !instance->skip_heartbeat_timer_del) +		del_timer_sync(&instance->sriov_heartbeat_timer); +  	scsi_remove_host(instance->host);  	megasas_flush_cache(instance);  	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); @@ -4790,6 +5286,9 @@ static void megasas_detach_one(struct pci_dev *pdev)  		instance->ev = NULL;  	} +	/* cancel all wait events */ +	wake_up_all(&instance->int_cmd_wait_q); +  	tasklet_kill(&instance->isr_tasklet);  	/* @@ -4805,14 +5304,15 @@ static void megasas_detach_one(struct pci_dev *pdev)  		}  	} -	pci_set_drvdata(instance->pdev, NULL); -  	instance->instancet->disable_intr(instance);  	if (instance->msix_vectors) -		for (i = 0 ; i < instance->msix_vectors; i++) +		for (i = 0; i < instance->msix_vectors; i++) { +			irq_set_affinity_hint( +				instance->msixentry[i].vector, NULL);  			free_irq(instance->msixentry[i].vector,  				 &instance->irq_context[i]); +		}  	else  		free_irq(instance->pdev->irq, &instance->irq_context[0]);  	if (instance->msix_vectors) @@ -4820,6 +5320,7 @@ static void megasas_detach_one(struct pci_dev *pdev)  	switch (instance->pdev->device) {  	case PCI_DEVICE_ID_LSI_FUSION: +	case PCI_DEVICE_ID_LSI_PLASMA:  	case PCI_DEVICE_ID_LSI_INVADER:  	case PCI_DEVICE_ID_LSI_FURY:  		megasas_release_fusion(instance); @@ -4846,9 +5347,25 @@ static void megasas_detach_one(struct pci_dev *pdev)  	if (instance->evt_detail)  		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),  				instance->evt_detail, instance->evt_detail_h); -	scsi_host_put(host); -	pci_set_drvdata(pdev, NULL); +	if (instance->vf_affiliation) +		pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) * +				    sizeof(struct MR_LD_VF_AFFILIATION), +				    instance->vf_affiliation, +				    instance->vf_affiliation_h); + +	if (instance->vf_affiliation_111) +		pci_free_consistent(pdev, +				    sizeof(struct MR_LD_VF_AFFILIATION_111), +				    instance->vf_affiliation_111, +				    instance->vf_affiliation_111_h); + +	if (instance->hb_host_mem) +		pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM), +				    instance->hb_host_mem, +				    instance->hb_host_mem_h); + +	scsi_host_put(host);  	pci_disable_device(pdev); @@ -4869,9 +5386,12 @@ static void megasas_shutdown(struct pci_dev *pdev)  	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);  	instance->instancet->disable_intr(instance);  	if (instance->msix_vectors) -		for (i = 0 ; i < instance->msix_vectors; i++) +		for (i = 0; i < instance->msix_vectors; i++) { +			irq_set_affinity_hint( +				instance->msixentry[i].vector, NULL);  			free_irq(instance->msixentry[i].vector,  				 &instance->irq_context[i]); +		}  	else  		free_irq(instance->pdev->irq, &instance->irq_context[0]);  	if (instance->msix_vectors) @@ -5046,7 +5566,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,  	 * cmd to the SCSI mid-layer  	 */  	cmd->sync_cmd = 1; -	megasas_issue_blocked_cmd(instance, cmd); +	megasas_issue_blocked_cmd(instance, cmd, 0);  	cmd->sync_cmd = 0;  	/* @@ -5133,6 +5653,16 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)  		goto out_kfree_ioc;  	} +	/* Adjust ioctl wait time for VF mode */ +	if (instance->requestorId) +		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; + +	/* Block ioctls in VF mode */ +	if (instance->requestorId && !allow_vf_ioctls) { +		error = -ENODEV; +		goto out_kfree_ioc; +	} +  	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {  		printk(KERN_ERR "Controller in crit error\n");  		error = -ENODEV; @@ -5442,7 +5972,7 @@ megasas_aen_polling(struct work_struct *work)  	u16     pd_index = 0;  	u16	ld_index = 0;  	int     i, j, doscan = 0; -	u32 seq_num; +	u32 seq_num, wait_time = MEGASAS_RESET_WAIT_TIME;  	int error;  	if (!instance) { @@ -5450,6 +5980,23 @@ megasas_aen_polling(struct work_struct *work)  		kfree(ev);  		return;  	} + +	/* Adjust event workqueue thread wait time for VF mode */ +	if (instance->requestorId) +		wait_time = MEGASAS_ROUTINE_WAIT_TIME_VF; + +	/* Don't run the event workqueue thread if OCR is running */ +	for (i = 0; i < wait_time; i++) { +		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) +			break; +		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { +			printk(KERN_NOTICE "megasas: %s waiting for " +			       "controller reset to finish for scsi%d\n", +			       __func__, instance->host->host_no); +		} +		msleep(1000); +	} +  	instance->ev = NULL;  	host = instance->host;  	if (instance->evt_detail) { @@ -5516,65 +6063,64 @@ megasas_aen_polling(struct work_struct *work)  		case MR_EVT_LD_OFFLINE:  		case MR_EVT_CFG_CLEARED:  		case MR_EVT_LD_DELETED: -			if (megasas_ld_list_query(instance, -					MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) -				megasas_get_ld_list(instance); -			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { -				for (j = 0; -				j < MEGASAS_MAX_DEV_PER_CHANNEL; -				j++) { - -				ld_index = -				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - -				sdev1 = scsi_device_lookup(host, -					MEGASAS_MAX_PD_CHANNELS + i, -					j, -					0); - -				if (instance->ld_ids[ld_index] != 0xff) { -					if (sdev1) { -						scsi_device_put(sdev1); -					} -				} else { -					if (sdev1) { -						scsi_remove_device(sdev1); -						scsi_device_put(sdev1); +			if (!instance->requestorId || +			    (instance->requestorId && +			     megasas_get_ld_vf_affiliation(instance, 0))) { +				if (megasas_ld_list_query(instance, +							  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) +					megasas_get_ld_list(instance); +				for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { +					for (j = 0; +					     j < MEGASAS_MAX_DEV_PER_CHANNEL; +					     j++) { + +						ld_index = +							(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; + +						sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); + +						if (instance->ld_ids[ld_index] +						    != 0xff) { +							if (sdev1) +								scsi_device_put(sdev1); +						} else { +							if (sdev1) { +								scsi_remove_device(sdev1); +								scsi_device_put(sdev1); +							} +						}  					}  				} -				} +				doscan = 0;  			} -			doscan = 0;  			break;  		case MR_EVT_LD_CREATED: -			if (megasas_ld_list_query(instance, -					MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) -				megasas_get_ld_list(instance); -			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { -				for (j = 0; -					j < MEGASAS_MAX_DEV_PER_CHANNEL; -					j++) { -					ld_index = -					(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; - -					sdev1 = scsi_device_lookup(host, -						MEGASAS_MAX_PD_CHANNELS + i, -						j, 0); - -					if (instance->ld_ids[ld_index] != -								0xff) { -						if (!sdev1) { -							scsi_add_device(host, -						MEGASAS_MAX_PD_CHANNELS + i, -								j, 0); +			if (!instance->requestorId || +			    (instance->requestorId && +			     megasas_get_ld_vf_affiliation(instance, 0))) { +				if (megasas_ld_list_query(instance, +							  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) +					megasas_get_ld_list(instance); +				for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { +					for (j = 0; +					     j < MEGASAS_MAX_DEV_PER_CHANNEL; +					     j++) { +						ld_index = +							(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; + +						sdev1 = scsi_device_lookup(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); + +						if (instance->ld_ids[ld_index] +						    != 0xff) { +							if (!sdev1) +								scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);  						} -					} -					if (sdev1) { -						scsi_device_put(sdev1); +						if (sdev1) +							scsi_device_put(sdev1);  					}  				} +				doscan = 0;  			} -			doscan = 0;  			break;  		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:  		case MR_EVT_FOREIGN_CFG_IMPORTED: @@ -5592,50 +6138,55 @@ megasas_aen_polling(struct work_struct *work)  	}  	if (doscan) { -		printk(KERN_INFO "scanning ...\n"); -		megasas_get_pd_list(instance); -		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { -			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { -				pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; -				sdev1 = scsi_device_lookup(host, i, j, 0); -				if (instance->pd_list[pd_index].driveState == -							MR_PD_STATE_SYSTEM) { -					if (!sdev1) { -						scsi_add_device(host, i, j, 0); -					} -					if (sdev1) -						scsi_device_put(sdev1); -				} else { -					if (sdev1) { -						scsi_remove_device(sdev1); -						scsi_device_put(sdev1); +		printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n", +		       instance->host->host_no); +		if (megasas_get_pd_list(instance) == 0) { +			for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { +				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { +					pd_index = i*MEGASAS_MAX_DEV_PER_CHANNEL + j; +					sdev1 = scsi_device_lookup(host, i, j, 0); +					if (instance->pd_list[pd_index].driveState == +					    MR_PD_STATE_SYSTEM) { +						if (!sdev1) { +							scsi_add_device(host, i, j, 0); +						} +						if (sdev1) +							scsi_device_put(sdev1); +					} else { +						if (sdev1) { +							scsi_remove_device(sdev1); +							scsi_device_put(sdev1); +						}  					}  				}  			}  		} -		if (megasas_ld_list_query(instance, -					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) -			megasas_get_ld_list(instance); -		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { -			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { -				ld_index = -				(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; +		if (!instance->requestorId || +		    (instance->requestorId && +		     megasas_get_ld_vf_affiliation(instance, 0))) { +			if (megasas_ld_list_query(instance, +						  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) +				megasas_get_ld_list(instance); +			for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { +				for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; +				     j++) { +					ld_index = +						(i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; -				sdev1 = scsi_device_lookup(host, -					MEGASAS_MAX_PD_CHANNELS + i, j, 0); -				if (instance->ld_ids[ld_index] != 0xff) { -					if (!sdev1) { -						scsi_add_device(host, -						MEGASAS_MAX_PD_CHANNELS + i, -								j, 0); +					sdev1 = scsi_device_lookup(host, +								   MEGASAS_MAX_PD_CHANNELS + i, j, 0); +					if (instance->ld_ids[ld_index] +					    != 0xff) { +						if (!sdev1) +							scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0); +						else +							scsi_device_put(sdev1);  					} else { -						scsi_device_put(sdev1); -					} -				} else { -					if (sdev1) { -						scsi_remove_device(sdev1); -						scsi_device_put(sdev1); +						if (sdev1) { +							scsi_remove_device(sdev1); +							scsi_device_put(sdev1); +						}  					}  				}  			} diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index e24b6eb645b..081bfff12d0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -143,12 +143,12 @@ u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)  u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)  { -	return map->raidMap.ldSpanMap[ld].ldRaid.targetId; +	return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);  } -u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) +u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)  { -	return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]); +	return map->raidMap.ldTgtIdToLd[ldTgtId];  }  static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, @@ -975,7 +975,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,  			regSize += stripSize;  	} -	pRAID_Context->timeoutValue     = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec); +	pRAID_Context->timeoutValue = +		cpu_to_le16(raid->fpIoTimeoutForLd ? +			    raid->fpIoTimeoutForLd : +			    map->raidMap.fpPdIoTimeoutSec);  	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||  		(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))  		pRAID_Context->regLockFlags = (isRead) ? diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index f6555921fd7..22600419ae9 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -62,7 +62,8 @@ megasas_complete_cmd(struct megasas_instance *instance,  		     struct megasas_cmd *cmd, u8 alt_status);  int megasas_is_ldio(struct scsi_cmnd *cmd);  int -wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd); +wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, +	      int seconds);  void  megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd); @@ -81,6 +82,13 @@ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);  void megaraid_sas_kill_hba(struct megasas_instance *instance);  extern u32 megasas_dbg_lvl; +void megasas_sriov_heartbeat_handler(unsigned long instance_addr); +int megasas_sriov_start_heartbeat(struct megasas_instance *instance, +				  int initial); +void megasas_start_timer(struct megasas_instance *instance, +			struct timer_list *timer, +			 void *fn, unsigned long interval); +extern struct megasas_mgmt_info megasas_mgmt_info;  extern int resetwaittime;  /** @@ -549,12 +557,13 @@ fail_req_desc:   * For polling, MFI requires the cmd_status to be set to 0xFF before posting.   */  int -wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd) +wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, +	int seconds)  {  	int i;  	struct megasas_header *frame_hdr = &cmd->frame->hdr; -	u32 msecs = MFI_POLL_TIMEOUT_SECS * 1000; +	u32 msecs = seconds * 1000;  	/*  	 * Wait for cmd_status to change @@ -585,7 +594,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)  	struct megasas_cmd *cmd;  	u8 ret;  	struct fusion_context *fusion; -	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; +	union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;  	int i;  	struct megasas_header *frame_hdr; @@ -644,18 +653,18 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)  	/* Convert capability to LE32 */  	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); -	init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle); +	init_frame->queue_info_new_phys_addr_hi = +		cpu_to_le32(upper_32_bits(ioc_init_handle)); +	init_frame->queue_info_new_phys_addr_lo = +		cpu_to_le32(lower_32_bits(ioc_init_handle));  	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); -	req_desc = -	  (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; - -	req_desc->Words = 0; -	req_desc->MFAIo.RequestFlags = +	req_desc.Words = 0; +	req_desc.MFAIo.RequestFlags =  		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<  		 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); -	cpu_to_le32s((u32 *)&req_desc->MFAIo); -	req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr); +	cpu_to_le32s((u32 *)&req_desc.MFAIo); +	req_desc.Words |= cpu_to_le64(cmd->frame_phys_addr);  	/*  	 * disable the intr before firing the init frame @@ -669,10 +678,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)  			break;  	} -	instance->instancet->fire_cmd(instance, req_desc->u.low, -				      req_desc->u.high, instance->reg_set); +	instance->instancet->fire_cmd(instance, req_desc.u.low, +				      req_desc.u.high, instance->reg_set); -	wait_and_poll(instance, cmd); +	wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);  	frame_hdr = &cmd->frame->hdr;  	if (frame_hdr->cmd_status != 0) { @@ -723,7 +732,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)  	if (!fusion) {  		megasas_return_cmd(instance, cmd); -		return 1; +		return -ENXIO;  	}  	dcmd = &cmd->frame->dcmd; @@ -1604,13 +1613,15 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,  			MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;  		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||  			(instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) -			io_request->IoFlags |= -				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; +			io_request->IoFlags |= cpu_to_le16( +				MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);  		cmd->request_desc->SCSIIO.RequestFlags =  			(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<  			 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);  		cmd->request_desc->SCSIIO.DevHandle =  			local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; +		cmd->request_desc->SCSIIO.MSIxIndex = +			instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;  		/*  		 * If the command is for the tape device, set the  		 * FP timeout to the os layer timeout value. @@ -1770,7 +1781,8 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)  	if (index >= instance->max_fw_cmds) {  		printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for " -		       "descriptor\n", index); +		       "descriptor for scsi%d\n", index, +			instance->host->host_no);  		return NULL;  	}  	fusion = instance->ctrl_context; @@ -2038,8 +2050,11 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)  		/* If we didn't complete any commands, check for FW fault */  		fw_state = instance->instancet->read_fw_status_reg(  			instance->reg_set) & MFI_STATE_MASK; -		if (fw_state == MFI_STATE_FAULT) +		if (fw_state == MFI_STATE_FAULT) { +			printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt" +			       "for scsi%d\n", instance->host->host_no);  			schedule_work(&instance->work_init); +		}  	}  	return IRQ_HANDLED; @@ -2210,9 +2225,10 @@ megasas_check_reset_fusion(struct megasas_instance *instance,  }  /* This function waits for outstanding commands on fusion to complete */ -int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance) +int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, +					int iotimeout, int *convert)  { -	int i, outstanding, retval = 0; +	int i, outstanding, retval = 0, hb_seconds_missed = 0;  	u32 fw_state;  	for (i = 0; i < resetwaittime; i++) { @@ -2221,18 +2237,49 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)  			instance->reg_set) & MFI_STATE_MASK;  		if (fw_state == MFI_STATE_FAULT) {  			printk(KERN_WARNING "megasas: Found FW in FAULT state," -			       " will reset adapter.\n"); +			       " will reset adapter scsi%d.\n", +				instance->host->host_no); +			retval = 1; +			goto out; +		} +		/* If SR-IOV VF mode & heartbeat timeout, don't wait */ +		if (instance->requestorId && !iotimeout) {  			retval = 1;  			goto out;  		} +		/* If SR-IOV VF mode & I/O timeout, check for HB timeout */ +		if (instance->requestorId && iotimeout) { +			if (instance->hb_host_mem->HB.fwCounter != +			    instance->hb_host_mem->HB.driverCounter) { +				instance->hb_host_mem->HB.driverCounter = +					instance->hb_host_mem->HB.fwCounter; +				hb_seconds_missed = 0; +			} else { +				hb_seconds_missed++; +				if (hb_seconds_missed == +				    (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { +					printk(KERN_WARNING "megasas: SR-IOV:" +					       " Heartbeat never completed " +					       " while polling during I/O " +					       " timeout handling for " +					       "scsi%d.\n", +					       instance->host->host_no); +					       *convert = 1; +					       retval = 1; +					       goto out; +				} +			} +		} +  		outstanding = atomic_read(&instance->fw_outstanding);  		if (!outstanding)  			goto out;  		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {  			printk(KERN_NOTICE "megasas: [%2d]waiting for %d " -			       "commands to complete\n", i, outstanding); +			       "commands to complete for scsi%d\n", i, +			       outstanding, instance->host->host_no);  			megasas_complete_cmd_dpc_fusion(  				(unsigned long)instance);  		} @@ -2241,7 +2288,8 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance)  	if (atomic_read(&instance->fw_outstanding)) {  		printk("megaraid_sas: pending commands remain after waiting, " -		       "will reset adapter.\n"); +		       "will reset adapter scsi%d.\n", +		       instance->host->host_no);  		retval = 1;  	}  out: @@ -2263,10 +2311,34 @@ void  megasas_reset_reply_desc(struct megasas_instance *instance)  		reply_desc->Words = ULLONG_MAX;  } +/* Check for a second path that is currently UP */ +int megasas_check_mpio_paths(struct megasas_instance *instance, +	struct scsi_cmnd *scmd) +{ +	int i, j, retval = (DID_RESET << 16); + +	if (instance->mpio && instance->requestorId) { +		for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++) +			for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++) +				if (megasas_mgmt_info.instance[i] && +				    (megasas_mgmt_info.instance[i] != instance) && +				    megasas_mgmt_info.instance[i]->mpio && +				    megasas_mgmt_info.instance[i]->requestorId +				    && +				    (megasas_mgmt_info.instance[i]->ld_ids[j] +				     == scmd->device->id)) { +					    retval = (DID_NO_CONNECT << 16); +					    goto out; +				} +	} +out: +	return retval; +} +  /* Core fusion reset function */ -int megasas_reset_fusion(struct Scsi_Host *shost) +int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)  { -	int retval = SUCCESS, i, j, retry = 0; +	int retval = SUCCESS, i, j, retry = 0, convert = 0;  	struct megasas_instance *instance;  	struct megasas_cmd_fusion *cmd_fusion;  	struct fusion_context *fusion; @@ -2277,28 +2349,39 @@ int megasas_reset_fusion(struct Scsi_Host *shost)  	instance = (struct megasas_instance *)shost->hostdata;  	fusion = instance->ctrl_context; +	mutex_lock(&instance->reset_mutex); +  	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {  		printk(KERN_WARNING "megaraid_sas: Hardware critical error, " -		       "returning FAILED.\n"); +		       "returning FAILED for scsi%d.\n", +			instance->host->host_no);  		return FAILED;  	} -	mutex_lock(&instance->reset_mutex); +	if (instance->requestorId && !instance->skip_heartbeat_timer_del) +		del_timer_sync(&instance->sriov_heartbeat_timer);  	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); -	instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; +	instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;  	instance->instancet->disable_intr(instance);  	msleep(1000);  	/* First try waiting for commands to complete */ -	if (megasas_wait_for_outstanding_fusion(instance)) { +	if (megasas_wait_for_outstanding_fusion(instance, iotimeout, +						&convert)) { +		instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;  		printk(KERN_WARNING "megaraid_sas: resetting fusion " -		       "adapter.\n"); +		       "adapter scsi%d.\n", instance->host->host_no); +		if (convert) +			iotimeout = 0; +  		/* Now return commands back to the OS */  		for (i = 0 ; i < instance->max_fw_cmds; i++) {  			cmd_fusion = fusion->cmd_list[i];  			if (cmd_fusion->scmd) {  				scsi_dma_unmap(cmd_fusion->scmd); -				cmd_fusion->scmd->result = (DID_RESET << 16); +				cmd_fusion->scmd->result = +					megasas_check_mpio_paths(instance, +								 cmd_fusion->scmd);  				cmd_fusion->scmd->scsi_done(cmd_fusion->scmd);  				megasas_return_cmd_fusion(instance, cmd_fusion);  				atomic_dec(&instance->fw_outstanding); @@ -2313,13 +2396,67 @@ int megasas_reset_fusion(struct Scsi_Host *shost)  		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {  			/* Reset not supported, kill adapter */  			printk(KERN_WARNING "megaraid_sas: Reset not supported" -			       ", killing adapter.\n"); +			       ", killing adapter scsi%d.\n", +				instance->host->host_no);  			megaraid_sas_kill_hba(instance); +			instance->skip_heartbeat_timer_del = 1;  			instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;  			retval = FAILED;  			goto out;  		} +		/* Let SR-IOV VF & PF sync up if there was a HB failure */ +		if (instance->requestorId && !iotimeout) { +			msleep(MEGASAS_OCR_SETTLE_TIME_VF); +			/* Look for a late HB update after VF settle time */ +			if (abs_state == MFI_STATE_OPERATIONAL && +			    (instance->hb_host_mem->HB.fwCounter != +			     instance->hb_host_mem->HB.driverCounter)) { +					instance->hb_host_mem->HB.driverCounter = +						instance->hb_host_mem->HB.fwCounter; +					printk(KERN_WARNING "megasas: SR-IOV:" +					       "Late FW heartbeat update for " +					       "scsi%d.\n", +					       instance->host->host_no); +			} else { +				/* In VF mode, first poll for FW ready */ +				for (i = 0; +				     i < (MEGASAS_RESET_WAIT_TIME * 1000); +				     i += 20) { +					status_reg = +						instance->instancet-> +						read_fw_status_reg( +							instance->reg_set); +					abs_state = status_reg & +						MFI_STATE_MASK; +					if (abs_state == MFI_STATE_READY) { +						printk(KERN_WARNING "megasas" +						       ": SR-IOV: FW was found" +						       "to be in ready state " +						       "for scsi%d.\n", +						       instance->host->host_no); +						break; +					} +					msleep(20); +				} +				if (abs_state != MFI_STATE_READY) { +					printk(KERN_WARNING "megasas: SR-IOV: " +					       "FW not in ready state after %d" +					       " seconds for scsi%d, status_reg = " +					       "0x%x.\n", +					       MEGASAS_RESET_WAIT_TIME, +					       instance->host->host_no, +					       status_reg); +					megaraid_sas_kill_hba(instance); +					instance->skip_heartbeat_timer_del = 1; +					instance->adprecovery = +						MEGASAS_HW_CRITICAL_ERROR; +					retval = FAILED; +					goto out; +				} +			} +		} +  		/* Now try to reset the chip */  		for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {  			writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, @@ -2346,7 +2483,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost)  				readl(&instance->reg_set->fusion_host_diag);  				if (retry++ == 100) {  					printk(KERN_WARNING "megaraid_sas: " -					       "Host diag unlock failed!\n"); +					       "Host diag unlock failed! " +					       "for scsi%d\n", +						instance->host->host_no);  					break;  				}  			} @@ -2368,7 +2507,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost)  				if (retry++ == 1000) {  					printk(KERN_WARNING "megaraid_sas: "  					       "Diag reset adapter never " -					       "cleared!\n"); +					       "cleared for scsi%d!\n", +						instance->host->host_no);  					break;  				}  			} @@ -2390,29 +2530,29 @@ int megasas_reset_fusion(struct Scsi_Host *shost)  			if (abs_state <= MFI_STATE_FW_INIT) {  				printk(KERN_WARNING "megaraid_sas: firmware "  				       "state < MFI_STATE_FW_INIT, state = " -				       "0x%x\n", abs_state); +				       "0x%x for scsi%d\n", abs_state, +					instance->host->host_no);  				continue;  			}  			/* Wait for FW to become ready */  			if (megasas_transition_to_ready(instance, 1)) {  				printk(KERN_WARNING "megaraid_sas: Failed to " -				       "transition controller to ready.\n"); +				       "transition controller to ready " +				       "for scsi%d.\n", +				       instance->host->host_no);  				continue;  			}  			megasas_reset_reply_desc(instance);  			if (megasas_ioc_init_fusion(instance)) {  				printk(KERN_WARNING "megaraid_sas: " -				       "megasas_ioc_init_fusion() failed!\n"); +				       "megasas_ioc_init_fusion() failed!" +				       " for scsi%d\n", +				       instance->host->host_no);  				continue;  			} -			clear_bit(MEGASAS_FUSION_IN_RESET, -				  &instance->reset_flags); -			instance->instancet->enable_intr(instance); -			instance->adprecovery = MEGASAS_HBA_OPERATIONAL; -  			/* Re-fire management commands */  			for (j = 0 ; j < instance->max_fw_cmds; j++) {  				cmd_fusion = fusion->cmd_list[j]; @@ -2422,7 +2562,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)  					instance->  					cmd_list[cmd_fusion->sync_cmd_idx];  					if (cmd_mfi->frame->dcmd.opcode == -					    MR_DCMD_LD_MAP_GET_INFO) { +					    cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {  						megasas_return_cmd(instance,  								   cmd_mfi);  						megasas_return_cmd_fusion( @@ -2433,11 +2573,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost)  							instance,  							cmd_mfi->context.smid  							-1); -						if (!req_desc) +						if (!req_desc) {  							printk(KERN_WARNING  							       "req_desc NULL" -							       "\n"); -						else { +							       " for scsi%d\n", +								instance->host->host_no); +							/* Return leaked MPT +							   frame */ +							megasas_return_cmd_fusion(instance, cmd_fusion); +						} else {  							instance->instancet->  							fire_cmd(instance,  								 req_desc-> @@ -2451,6 +2595,11 @@ int megasas_reset_fusion(struct Scsi_Host *shost)  				}  			} +			clear_bit(MEGASAS_FUSION_IN_RESET, +				  &instance->reset_flags); +			instance->instancet->enable_intr(instance); +			instance->adprecovery = MEGASAS_HBA_OPERATIONAL; +  			/* Reset load balance info */  			memset(fusion->load_balance_info, 0,  			       sizeof(struct LD_LOAD_BALANCE_INFO) @@ -2459,18 +2608,39 @@ int megasas_reset_fusion(struct Scsi_Host *shost)  			if (!megasas_get_map_info(instance))  				megasas_sync_map_info(instance); +			/* Restart SR-IOV heartbeat */ +			if (instance->requestorId) { +				if (!megasas_sriov_start_heartbeat(instance, 0)) +					megasas_start_timer(instance, +							    &instance->sriov_heartbeat_timer, +							    megasas_sriov_heartbeat_handler, +							    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); +				else +					instance->skip_heartbeat_timer_del = 1; +			} +  			/* Adapter reset completed successfully */  			printk(KERN_WARNING "megaraid_sas: Reset " -			       "successful.\n"); +			       "successful for scsi%d.\n", +				instance->host->host_no);  			retval = SUCCESS;  			goto out;  		}  		/* Reset failed, kill the adapter */  		printk(KERN_WARNING "megaraid_sas: Reset failed, killing " -		       "adapter.\n"); +		       "adapter scsi%d.\n", instance->host->host_no);  		megaraid_sas_kill_hba(instance); +		instance->skip_heartbeat_timer_del = 1; +		instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;  		retval = FAILED;  	} else { +		/* For VF: Restart HB timer if we didn't OCR */ +		if (instance->requestorId) { +			megasas_start_timer(instance, +					    &instance->sriov_heartbeat_timer, +					    megasas_sriov_heartbeat_handler, +					    MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); +		}  		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);  		instance->instancet->enable_intr(instance);  		instance->adprecovery = MEGASAS_HBA_OPERATIONAL; @@ -2487,7 +2657,7 @@ void megasas_fusion_ocr_wq(struct work_struct *work)  	struct megasas_instance *instance =  		container_of(work, struct megasas_instance, work_init); -	megasas_reset_fusion(instance->host); +	megasas_reset_fusion(instance->host, 0);  }  struct megasas_instance_template megasas_instance_template_fusion = { diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 35a51397b36..e76af5459a0 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -485,6 +485,9 @@ struct MPI2_IOC_INIT_REQUEST {  #define MAX_PHYSICAL_DEVICES 256  #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)  #define MR_DCMD_LD_MAP_GET_INFO             0x0300e101 +#define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC  0x010e8485   /* SR-IOV HB alloc*/ +#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111   0x03200200 +#define MR_DCMD_LD_VF_MAP_GET_ALL_LDS       0x03150200  struct MR_DEV_HANDLE_INFO {  	u16     curDevHdl; diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 3901edc3581..8b88118e20e 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -128,7 +128,7 @@ static int mpt2sas_remove_dead_ioc_func(void *arg)  		pdev = ioc->pdev;  		if ((pdev == NULL))  			return -1; -		pci_stop_and_remove_bus_device(pdev); +		pci_stop_and_remove_bus_device_locked(pdev);  		return 0;  } @@ -1739,14 +1739,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)  			list_for_each_entry_safe(chain_req, next,  			    &ioc->scsi_lookup[i].chain_list, tracker_list) {  				list_del_init(&chain_req->tracker_list); -				list_add_tail(&chain_req->tracker_list, +				list_add(&chain_req->tracker_list,  				    &ioc->free_chain_list);  			}  		}  		ioc->scsi_lookup[i].cb_idx = 0xFF;  		ioc->scsi_lookup[i].scmd = NULL;  		ioc->scsi_lookup[i].direct_io = 0; -		list_add_tail(&ioc->scsi_lookup[i].tracker_list, +		list_add(&ioc->scsi_lookup[i].tracker_list,  		    &ioc->free_list);  		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); @@ -1764,13 +1764,13 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)  		/* hi-priority */  		i = smid - ioc->hi_priority_smid;  		ioc->hpr_lookup[i].cb_idx = 0xFF; -		list_add_tail(&ioc->hpr_lookup[i].tracker_list, +		list_add(&ioc->hpr_lookup[i].tracker_list,  		    &ioc->hpr_free_list);  	} else if (smid <= ioc->hba_queue_depth) {  		/* internal queue */  		i = smid - ioc->internal_smid;  		ioc->internal_lookup[i].cb_idx = 0xFF; -		list_add_tail(&ioc->internal_lookup[i].tracker_list, +		list_add(&ioc->internal_lookup[i].tracker_list,  		    &ioc->internal_free_list);  	}  	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 1f2ac3a2862..fd3b998c75b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -1065,7 +1065,7 @@ void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,      u32 reply);  int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,  	uint channel, uint id, uint lun, u8 type, u16 smid_task, -	ulong timeout, unsigned long serial_number, enum mutex_type m_type); +	ulong timeout, enum mutex_type m_type);  void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);  void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);  void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address); diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index b7f887c9b0b..62df8f9d427 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -987,7 +987,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg,  			mpt2sas_scsih_issue_tm(ioc,  			    le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,  			    0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10, -			    0, TM_MUTEX_ON); +			    TM_MUTEX_ON);  			ioc->tm_cmds.status = MPT2_CMD_NOT_USED;  		} else  			mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP, diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 7f0af4fcc00..5055f925d2c 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2368,7 +2368,6 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)   * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)   * @smid_task: smid assigned to the task   * @timeout: timeout in seconds - * @serial_number: the serial_number from scmd   * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF   * Context: user   * @@ -2381,7 +2380,7 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)  int  mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,      uint id, uint lun, u8 type, u16 smid_task, ulong timeout, -	unsigned long serial_number, enum mutex_type m_type) +	enum mutex_type m_type)  {  	Mpi2SCSITaskManagementRequest_t *mpi_request;  	Mpi2SCSITaskManagementReply_t *mpi_reply; @@ -2634,8 +2633,7 @@ _scsih_abort(struct scsi_cmnd *scmd)  	handle = sas_device_priv_data->sas_target->handle;  	r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,  	    scmd->device->id, scmd->device->lun, -	    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, -	    scmd->serial_number, TM_MUTEX_ON); +	    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);   out:  	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", @@ -2696,8 +2694,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)  	r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,  	    scmd->device->id, scmd->device->lun, -	    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, -	    TM_MUTEX_ON); +	    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);   out:  	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", @@ -2757,7 +2754,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)  	r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,  	    scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, -	    30, 0, TM_MUTEX_ON); +	    30, TM_MUTEX_ON);   out:  	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", @@ -3953,9 +3950,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,   * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full   */  static int -_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)  { -	struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); +	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);  	struct MPT2SAS_DEVICE *sas_device_priv_data;  	struct MPT2SAS_TARGET *sas_target_priv_data;  	struct _raid_device *raid_device; @@ -3963,7 +3960,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))  	u32 mpi_control;  	u16 smid; -	scmd->scsi_done = done;  	sas_device_priv_data = scmd->device->hostdata;  	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {  		scmd->result = DID_NO_CONNECT << 16; @@ -4039,7 +4035,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))  	    MPT_TARGET_FLAGS_RAID_COMPONENT)  		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;  	else -		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; +	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;  	mpi_request->DevHandle =  	    cpu_to_le16(sas_device_priv_data->sas_target->handle);  	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); @@ -4083,8 +4079,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))  	return SCSI_MLQUEUE_HOST_BUSY;  } -static DEF_SCSI_QCMD(_scsih_qcmd) -  /**   * _scsih_normalize_sense - normalize descriptor and fixed format sense data   * @sense_buffer: sense data returned by target @@ -5880,7 +5874,7 @@ broadcast_aen_retry:  		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);  		r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun, -		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, +		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,  		    TM_MUTEX_OFF);  		if (r == FAILED) {  			sdev_printk(KERN_WARNING, sdev, @@ -5922,7 +5916,7 @@ broadcast_aen_retry:  		r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,  		    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, -		    scmd->serial_number, TM_MUTEX_OFF); +		    TM_MUTEX_OFF);  		if (r == FAILED) {  			sdev_printk(KERN_WARNING, sdev,  			    "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : " @@ -8293,7 +8287,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)  	mpt2sas_base_free_resources(ioc);  	pci_save_state(pdev); -	pci_disable_device(pdev);  	pci_set_power_state(pdev, device_state);  	return 0;  } diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 9d26637308b..410f4a3e888 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);  	Mpi2SmpPassthroughRequest_t *mpi_request;  	Mpi2SmpPassthroughReply_t *mpi_reply; -	int rc, i; +	int rc;  	u16 smid;  	u32 ioc_state;  	unsigned long timeleft; @@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	void *pci_addr_out = NULL;  	u16 wait_state_count;  	struct request *rsp = req->next_rq; -	struct bio_vec *bvec = NULL; +	struct bio_vec bvec; +	struct bvec_iter iter;  	if (!rsp) {  		printk(MPT2SAS_ERR_FMT "%s: the smp response space is " @@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	ioc->transport_cmds.status = MPT2_CMD_PENDING;  	/* Check if the request is split across multiple segments */ -	if (bio_segments(req->bio) > 1) { +	if (bio_multiple_segments(req->bio)) {  		u32 offset = 0;  		/* Allocate memory and copy the request */ @@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  			goto out;  		} -		bio_for_each_segment(bvec, req->bio, i) { +		bio_for_each_segment(bvec, req->bio, iter) {  			memcpy(pci_addr_out + offset, -			    page_address(bvec->bv_page) + bvec->bv_offset, -			    bvec->bv_len); -			offset += bvec->bv_len; +			    page_address(bvec.bv_page) + bvec.bv_offset, +			    bvec.bv_len); +			offset += bvec.bv_len;  		}  	} else {  		dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), @@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	/* Check if the response needs to be populated across  	 * multiple segments */ -	if (bio_segments(rsp->bio) > 1) { +	if (bio_multiple_segments(rsp->bio)) {  		pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),  		    &pci_dma_in);  		if (!pci_addr_in) { @@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |  	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);  	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; -	if (bio_segments(req->bio) > 1) { +	if (bio_multiple_segments(req->bio)) {  		ioc->base_add_sg_single(psge, sgl_flags |  		    (blk_rq_bytes(req) - 4), pci_dma_out);  	} else { @@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |  	    MPI2_SGE_FLAGS_END_OF_LIST);  	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; -	if (bio_segments(rsp->bio) > 1) { +	if (bio_multiple_segments(rsp->bio)) {  		ioc->base_add_sg_single(psge, sgl_flags |  		    (blk_rq_bytes(rsp) + 4), pci_dma_in);  	} else { @@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  		    le16_to_cpu(mpi_reply->ResponseDataLength);  		/* check if the resp needs to be copied from the allocated  		 * pci mem */ -		if (bio_segments(rsp->bio) > 1) { +		if (bio_multiple_segments(rsp->bio)) {  			u32 offset = 0;  			u32 bytes_to_copy =  			    le16_to_cpu(mpi_reply->ResponseDataLength); -			bio_for_each_segment(bvec, rsp->bio, i) { -				if (bytes_to_copy <= bvec->bv_len) { -					memcpy(page_address(bvec->bv_page) + -					    bvec->bv_offset, pci_addr_in + +			bio_for_each_segment(bvec, rsp->bio, iter) { +				if (bytes_to_copy <= bvec.bv_len) { +					memcpy(page_address(bvec.bv_page) + +					    bvec.bv_offset, pci_addr_in +  					    offset, bytes_to_copy);  					break;  				} else { -					memcpy(page_address(bvec->bv_page) + -					    bvec->bv_offset, pci_addr_in + -					    offset, bvec->bv_len); -					bytes_to_copy -= bvec->bv_len; +					memcpy(page_address(bvec.bv_page) + +					    bvec.bv_offset, pci_addr_in + +					    offset, bvec.bv_len); +					bytes_to_copy -= bvec.bv_len;  				} -				offset += bvec->bv_len; +				offset += bvec.bv_len;  			}  		}  	} else { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index fa785062e97..0cf4f7000f9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -131,7 +131,7 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)  	pdev = ioc->pdev;  	if ((pdev == NULL))  		return -1; -	pci_stop_and_remove_bus_device(pdev); +	pci_stop_and_remove_bus_device_locked(pdev);  	return 0;  } diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 0ebf5d913c8..9b90a6fef70 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -993,7 +993,7 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);  int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,  	uint channel, uint id, uint lun, u8 type, u16 smid_task, -	ulong timeout, unsigned long serial_number,  enum mutex_type m_type); +	ulong timeout, enum mutex_type m_type);  void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);  void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);  void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 9b89de14a0a..ba9cbe598a9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -980,7 +980,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,  			mpt3sas_scsih_issue_tm(ioc,  			    le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,  			    0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, -			    0, TM_MUTEX_ON); +			    TM_MUTEX_ON);  		} else  			mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,  			    FORCE_BIG_HAMMER); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index a961fe11b52..18e713db1d3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2029,7 +2029,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)   * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)   * @smid_task: smid assigned to the task   * @timeout: timeout in seconds - * @serial_number: the serial_number from scmd   * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF   * Context: user   * @@ -2042,7 +2041,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)  int  mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,  	uint id, uint lun, u8 type, u16 smid_task, ulong timeout, -	unsigned long serial_number, enum mutex_type m_type) +	enum mutex_type m_type)  {  	Mpi2SCSITaskManagementRequest_t *mpi_request;  	Mpi2SCSITaskManagementReply_t *mpi_reply; @@ -2293,8 +2292,7 @@ _scsih_abort(struct scsi_cmnd *scmd)  	handle = sas_device_priv_data->sas_target->handle;  	r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,  	    scmd->device->id, scmd->device->lun, -	    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, -	    scmd->serial_number, TM_MUTEX_ON); +	    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);   out:  	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", @@ -2353,8 +2351,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)  	r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,  	    scmd->device->id, scmd->device->lun, -	    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, -	    TM_MUTEX_ON); +	    MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);   out:  	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", @@ -2414,7 +2411,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)  	r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,  	    scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, -	    30, 0, TM_MUTEX_ON); +	    30, TM_MUTEX_ON);   out:  	starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", @@ -3518,7 +3515,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)  /** - * _scsih_qcmd_lck - main scsi request entry point + * _scsih_qcmd - main scsi request entry point   * @scmd: pointer to scsi command object   * @done: function pointer to be invoked on completion   * @@ -3529,9 +3526,9 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)   * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full   */  static int -_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)  { -	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); +	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);  	struct MPT3SAS_DEVICE *sas_device_priv_data;  	struct MPT3SAS_TARGET *sas_target_priv_data;  	Mpi2SCSIIORequest_t *mpi_request; @@ -3544,7 +3541,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))  		scsi_print_command(scmd);  #endif -	scmd->scsi_done = done;  	sas_device_priv_data = scmd->device->hostdata;  	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {  		scmd->result = DID_NO_CONNECT << 16; @@ -3659,8 +3655,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))   out:  	return SCSI_MLQUEUE_HOST_BUSY;  } -static DEF_SCSI_QCMD(_scsih_qcmd) -  /**   * _scsih_normalize_sense - normalize descriptor and fixed format sense data @@ -5425,7 +5419,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,  		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);  		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, -		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, +		    MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,  		    TM_MUTEX_OFF);  		if (r == FAILED) {  			sdev_printk(KERN_WARNING, sdev, @@ -5467,7 +5461,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,  		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,  		    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, -		    scmd->serial_number, TM_MUTEX_OFF); +		    TM_MUTEX_OFF);  		if (r == FAILED) {  			sdev_printk(KERN_WARNING, sdev,  			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index e771a88c6a7..65170cb1a00 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);  	Mpi2SmpPassthroughRequest_t *mpi_request;  	Mpi2SmpPassthroughReply_t *mpi_reply; -	int rc, i; +	int rc;  	u16 smid;  	u32 ioc_state;  	unsigned long timeleft; @@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	void *pci_addr_out = NULL;  	u16 wait_state_count;  	struct request *rsp = req->next_rq; -	struct bio_vec *bvec = NULL; +	struct bio_vec bvec; +	struct bvec_iter iter;  	if (!rsp) {  		pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", @@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	ioc->transport_cmds.status = MPT3_CMD_PENDING;  	/* Check if the request is split across multiple segments */ -	if (req->bio->bi_vcnt > 1) { +	if (bio_multiple_segments(req->bio)) {  		u32 offset = 0;  		/* Allocate memory and copy the request */ @@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  			goto out;  		} -		bio_for_each_segment(bvec, req->bio, i) { +		bio_for_each_segment(bvec, req->bio, iter) {  			memcpy(pci_addr_out + offset, -			    page_address(bvec->bv_page) + bvec->bv_offset, -			    bvec->bv_len); -			offset += bvec->bv_len; +			    page_address(bvec.bv_page) + bvec.bv_offset, +			    bvec.bv_len); +			offset += bvec.bv_len;  		}  	} else {  		dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), @@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	/* Check if the response needs to be populated across  	 * multiple segments */ -	if (rsp->bio->bi_vcnt > 1) { +	if (bio_multiple_segments(rsp->bio)) {  		pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),  		    &pci_dma_in);  		if (!pci_addr_in) { @@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  	mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);  	psge = &mpi_request->SGL; -	if (req->bio->bi_vcnt > 1) +	if (bio_multiple_segments(req->bio))  		ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),  		    pci_dma_in, (blk_rq_bytes(rsp) + 4));  	else @@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,  		/* check if the resp needs to be copied from the allocated  		 * pci mem */ -		if (rsp->bio->bi_vcnt > 1) { +		if (bio_multiple_segments(rsp->bio)) {  			u32 offset = 0;  			u32 bytes_to_copy =  			    le16_to_cpu(mpi_reply->ResponseDataLength); -			bio_for_each_segment(bvec, rsp->bio, i) { -				if (bytes_to_copy <= bvec->bv_len) { -					memcpy(page_address(bvec->bv_page) + -					    bvec->bv_offset, pci_addr_in + +			bio_for_each_segment(bvec, rsp->bio, iter) { +				if (bytes_to_copy <= bvec.bv_len) { +					memcpy(page_address(bvec.bv_page) + +					    bvec.bv_offset, pci_addr_in +  					    offset, bytes_to_copy);  					break;  				} else { -					memcpy(page_address(bvec->bv_page) + -					    bvec->bv_offset, pci_addr_in + -					    offset, bvec->bv_len); -					bytes_to_copy -= bvec->bv_len; +					memcpy(page_address(bvec.bv_page) + +					    bvec.bv_offset, pci_addr_in + +					    offset, bvec.bv_len); +					bytes_to_copy -= bvec.bv_len;  				} -				offset += bvec->bv_len; +				offset += bvec.bv_len;  			}  		}  	} else { diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 1e4479f3331..9270d15ff1a 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -564,7 +564,7 @@ static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)  	u32 tmp;  	tmp = mr32(MVS_GBL_CTL); -	tmp |= (IRQ_SAS_A | IRQ_SAS_B); +	tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);  	mw32(MVS_GBL_INT_STAT, tmp);  	writel(tmp, regs + 0x0C);  	writel(tmp, regs + 0x10); @@ -580,7 +580,7 @@ static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)  	tmp = mr32(MVS_GBL_CTL); -	tmp &= ~(IRQ_SAS_A | IRQ_SAS_B); +	tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);  	mw32(MVS_GBL_INT_STAT, tmp);  	writel(tmp, regs + 0x0C);  	writel(tmp, regs + 0x10); @@ -596,7 +596,7 @@ static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)  	if (!(mvi->flags & MVF_FLAG_SOC)) {  		stat = mr32(MVS_GBL_INT_STAT); -		if (!(stat & (IRQ_SAS_A | IRQ_SAS_B))) +		if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B)))  			return 0;  	}  	return stat; @@ -606,8 +606,8 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)  {  	void __iomem *regs = mvi->regs; -	if (((stat & IRQ_SAS_A) && mvi->id == 0) || -			((stat & IRQ_SAS_B) && mvi->id == 1)) { +	if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) || +			((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) {  		mw32_f(MVS_INT_STAT, CINT_DONE);  		spin_lock(&mvi->lock); diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h index 487aa6f9741..14e197497b4 100644 --- a/drivers/scsi/mvsas/mv_94xx.h +++ b/drivers/scsi/mvsas/mv_94xx.h @@ -150,35 +150,35 @@ enum chip_register_bits {  enum pci_interrupt_cause {  	/*  MAIN_IRQ_CAUSE (R10200) Bits*/ -	IRQ_COM_IN_I2O_IOP0            = (1 << 0), -	IRQ_COM_IN_I2O_IOP1            = (1 << 1), -	IRQ_COM_IN_I2O_IOP2            = (1 << 2), -	IRQ_COM_IN_I2O_IOP3            = (1 << 3), -	IRQ_COM_OUT_I2O_HOS0           = (1 << 4), -	IRQ_COM_OUT_I2O_HOS1           = (1 << 5), -	IRQ_COM_OUT_I2O_HOS2           = (1 << 6), -	IRQ_COM_OUT_I2O_HOS3           = (1 << 7), -	IRQ_PCIF_TO_CPU_DRBL0          = (1 << 8), -	IRQ_PCIF_TO_CPU_DRBL1          = (1 << 9), -	IRQ_PCIF_TO_CPU_DRBL2          = (1 << 10), -	IRQ_PCIF_TO_CPU_DRBL3          = (1 << 11), -	IRQ_PCIF_DRBL0                 = (1 << 12), -	IRQ_PCIF_DRBL1                 = (1 << 13), -	IRQ_PCIF_DRBL2                 = (1 << 14), -	IRQ_PCIF_DRBL3                 = (1 << 15), -	IRQ_XOR_A                      = (1 << 16), -	IRQ_XOR_B                      = (1 << 17), -	IRQ_SAS_A                      = (1 << 18), -	IRQ_SAS_B                      = (1 << 19), -	IRQ_CPU_CNTRL                  = (1 << 20), -	IRQ_GPIO                       = (1 << 21), -	IRQ_UART                       = (1 << 22), -	IRQ_SPI                        = (1 << 23), -	IRQ_I2C                        = (1 << 24), -	IRQ_SGPIO                      = (1 << 25), -	IRQ_COM_ERR                    = (1 << 29), -	IRQ_I2O_ERR                    = (1 << 30), -	IRQ_PCIE_ERR                   = (1 << 31), +	MVS_IRQ_COM_IN_I2O_IOP0        = (1 << 0), +	MVS_IRQ_COM_IN_I2O_IOP1        = (1 << 1), +	MVS_IRQ_COM_IN_I2O_IOP2        = (1 << 2), +	MVS_IRQ_COM_IN_I2O_IOP3        = (1 << 3), +	MVS_IRQ_COM_OUT_I2O_HOS0       = (1 << 4), +	MVS_IRQ_COM_OUT_I2O_HOS1       = (1 << 5), +	MVS_IRQ_COM_OUT_I2O_HOS2       = (1 << 6), +	MVS_IRQ_COM_OUT_I2O_HOS3       = (1 << 7), +	MVS_IRQ_PCIF_TO_CPU_DRBL0      = (1 << 8), +	MVS_IRQ_PCIF_TO_CPU_DRBL1      = (1 << 9), +	MVS_IRQ_PCIF_TO_CPU_DRBL2      = (1 << 10), +	MVS_IRQ_PCIF_TO_CPU_DRBL3      = (1 << 11), +	MVS_IRQ_PCIF_DRBL0             = (1 << 12), +	MVS_IRQ_PCIF_DRBL1             = (1 << 13), +	MVS_IRQ_PCIF_DRBL2             = (1 << 14), +	MVS_IRQ_PCIF_DRBL3             = (1 << 15), +	MVS_IRQ_XOR_A                  = (1 << 16), +	MVS_IRQ_XOR_B                  = (1 << 17), +	MVS_IRQ_SAS_A                  = (1 << 18), +	MVS_IRQ_SAS_B                  = (1 << 19), +	MVS_IRQ_CPU_CNTRL              = (1 << 20), +	MVS_IRQ_GPIO                   = (1 << 21), +	MVS_IRQ_UART                   = (1 << 22), +	MVS_IRQ_SPI                    = (1 << 23), +	MVS_IRQ_I2C                    = (1 << 24), +	MVS_IRQ_SGPIO                  = (1 << 25), +	MVS_IRQ_COM_ERR                = (1 << 29), +	MVS_IRQ_I2O_ERR                = (1 << 30), +	MVS_IRQ_PCIE_ERR               = (1 << 31),  };  union reg_phy_cfg { diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 7b7381d7671..eacee48a955 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -657,7 +657,6 @@ static void mvs_pci_remove(struct pci_dev *pdev)  	tasklet_kill(&((struct mvs_prv_info *)sha->lldd_ha)->mv_tasklet);  #endif -	pci_set_drvdata(pdev, NULL);  	sas_unregister_ha(sha);  	sas_remove_host(mvi->shost);  	scsi_remove_host(mvi->shost); @@ -729,6 +728,15 @@ static struct pci_device_id mvs_pci_table[] = {  		.class_mask	= 0,  		.driver_data	= chip_9485,  	}, +	{ +		.vendor		= PCI_VENDOR_ID_MARVELL_EXT, +		.device		= 0x9485, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= 0x9485, +		.class		= 0, +		.class_mask	= 0, +		.driver_data	= chip_9485, +	},  	{ PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */  	{ PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */  	{ PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */ diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 6b1b4e91e53..6c1f223a8e1 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -1411,7 +1411,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,  		if (res) {  			del_timer(&task->slow_task->timer); -			mv_printk("executing internel task failed:%d\n", res); +			mv_printk("executing internal task failed:%d\n", res);  			goto ex_err;  		} diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index c3601b57a80..edbee8dc62c 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -2583,7 +2583,6 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	return 0;  fail_io_attach: -	pci_set_drvdata(pdev, NULL);  	mhba->instancet->disable_intr(mhba);  	free_irq(mhba->pdev->irq, mhba);  fail_init_irq: @@ -2618,7 +2617,6 @@ static void mvumi_detach_one(struct pci_dev *pdev)  	free_irq(mhba->pdev->irq, mhba);  	mvumi_release_fw(mhba);  	scsi_host_put(host); -	pci_set_drvdata(pdev, NULL);  	pci_disable_device(pdev);  	dev_dbg(&pdev->dev, "driver is removed!\n");  } diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index 5982a587bab..7d014b11df6 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -1615,7 +1615,7 @@ struct ncb {  	spinlock_t	smp_lock;	/* Lock for SMP threading       */  	/*---------------------------------------------------------------- -	**	Chip and controller indentification. +	**	Chip and controller identification.  	**----------------------------------------------------------------  	*/  	int		unit;		/* Unit number			*/ diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index aa66361ed44..5f4cbf0c475 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,  	bio->bi_rw &= ~REQ_WRITE;  	or->in.bio = bio; -	or->in.total_bytes = bio->bi_size; +	or->in.total_bytes = bio->bi_iter.bi_size;  	return 0;  } @@ -1570,6 +1570,7 @@ static struct request *_make_request(struct request_queue *q, bool has_write,  		if (unlikely(!req))  			return ERR_PTR(-ENOMEM); +		blk_rq_set_block_pc(req);  		return req;  	}  } @@ -1590,7 +1591,6 @@ static int _init_blk_request(struct osd_request *or,  	}  	or->request = req; -	req->cmd_type = REQ_TYPE_BLOCK_PC;  	req->cmd_flags |= REQ_QUIET;  	req->timeout = or->timeout; @@ -1608,7 +1608,7 @@ static int _init_blk_request(struct osd_request *or,  				ret = PTR_ERR(req);  				goto out;  			} -			req->cmd_type = REQ_TYPE_BLOCK_PC; +			blk_rq_set_block_pc(req);  			or->in.req = or->request->next_rq = req;  		}  	} else if (has_in) diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 21883a2d632..0727ea7cc38 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -365,7 +365,7 @@ static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,  	if (!req)  		return DRIVER_ERROR << 24; -	req->cmd_type = REQ_TYPE_BLOCK_PC; +	blk_rq_set_block_pc(req);  	req->cmd_flags |= REQ_QUIET;  	SRpnt->bio = NULL; diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c index 62f1a603176..0d78a4d5576 100644 --- a/drivers/scsi/pas16.c +++ b/drivers/scsi/pas16.c @@ -453,7 +453,7 @@ int __init pas16_detect(struct scsi_host_template * tpnt)  	    instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);  	if (instance->irq != SCSI_IRQ_NONE)  -	    if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, +	    if (request_irq(instance->irq, pas16_intr, 0,  			    "pas16", instance)) {  		printk("scsi%d : IRQ%d not free, interrupts disabled\n",   		    instance->host_no, instance->irq); diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index 3721342835e..aa528f53c53 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h @@ -129,8 +129,6 @@ static int pas16_bus_reset(Scsi_Cmnd *);  #define CAN_QUEUE 32   #endif -#ifndef HOSTS_C -  #define NCR5380_implementation_fields \      volatile unsigned short io_port @@ -171,6 +169,5 @@ static int pas16_bus_reset(Scsi_Cmnd *);  #define PAS16_IRQS 0xd4a8  -#endif /* else def HOSTS_C */  #endif /* ndef ASM */  #endif /* PAS16_H */ diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index d99f41c2ca1..a368d77b8d4 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -309,6 +309,106 @@ static ssize_t pm8001_ctl_aap_log_show(struct device *cdev,  }  static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL);  /** + * pm8001_ctl_ib_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @buf: the buffer returned + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, +	struct device_attribute *attr, char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(cdev); +	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); +	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; +	int offset; +	char *str = buf; +	int start = 0; +#define IB_MEMMAP(c)	\ +		(*(u32 *)((u8 *)pm8001_ha->	\ +		memoryMap.region[IB].virt_ptr +	\ +		pm8001_ha->evtlog_ib_offset + (c))) + +	for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { +		str += sprintf(str, "0x%08x\n", IB_MEMMAP(start)); +		start = start + 4; +	} +	pm8001_ha->evtlog_ib_offset += SYSFS_OFFSET; +	if (((pm8001_ha->evtlog_ib_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) +		pm8001_ha->evtlog_ib_offset = 0; + +	return str - buf; +} + +static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL); +/** + * pm8001_ctl_ob_queue_log_show - Out bound Queue log + * @cdev:pointer to embedded class device + * @buf: the buffer returned + * A sysfs 'read-only' shost attribute. + */ + +static ssize_t pm8001_ctl_ob_queue_log_show(struct device *cdev, +	struct device_attribute *attr, char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(cdev); +	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); +	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; +	int offset; +	char *str = buf; +	int start = 0; +#define OB_MEMMAP(c)	\ +		(*(u32 *)((u8 *)pm8001_ha->	\ +		memoryMap.region[OB].virt_ptr +	\ +		pm8001_ha->evtlog_ob_offset + (c))) + +	for (offset = 0; offset < IB_OB_READ_TIMES; offset++) { +		str += sprintf(str, "0x%08x\n", OB_MEMMAP(start)); +		start = start + 4; +	} +	pm8001_ha->evtlog_ob_offset += SYSFS_OFFSET; +	if (((pm8001_ha->evtlog_ob_offset) % (PM80XX_IB_OB_QUEUE_SIZE)) == 0) +		pm8001_ha->evtlog_ob_offset = 0; + +	return str - buf; +} +static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL); +/** + * pm8001_ctl_bios_version_show - Bios version Display + * @cdev:pointer to embedded class device + * @buf:the buffer returned + * A sysfs 'read-only' shost attribute. + */ +static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, +	struct device_attribute *attr, char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(cdev); +	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); +	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; +	char *str = buf; +	void *virt_addr; +	int bios_index; +	DECLARE_COMPLETION_ONSTACK(completion); +	struct pm8001_ioctl_payload payload; + +	pm8001_ha->nvmd_completion = &completion; +	payload.minor_function = 7; +	payload.offset = 0; +	payload.length = 4096; +	payload.func_specific = kzalloc(4096, GFP_KERNEL); +	if (!payload.func_specific) +		return -ENOMEM; +	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); +	wait_for_completion(&completion); +	virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; +	for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; +		bios_index++) +		str += sprintf(str, "%c", +			*((u8 *)((u8 *)virt_addr+bios_index))); +	kfree(payload.func_specific); +	return str - buf; +} +static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL); +/**   * pm8001_ctl_aap_log_show - IOP event log   * @cdev: pointer to embedded class device   * @buf: the buffer returned @@ -344,6 +444,43 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,  }  static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); +/** + ** pm8001_ctl_fatal_log_show - fatal error logging + ** @cdev:pointer to embedded class device + ** @buf: the buffer returned + ** + ** A sysfs 'read-only' shost attribute. + **/ + +static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, +	struct device_attribute *attr, char *buf) +{ +	ssize_t count; + +	count = pm80xx_get_fatal_dump(cdev, attr, buf); +	return count; +} + +static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL); + + +/** + ** pm8001_ctl_gsm_log_show - gsm dump collection + ** @cdev:pointer to embedded class device + ** @buf: the buffer returned + **A sysfs 'read-only' shost attribute. + **/ +static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, +	struct device_attribute *attr, char *buf) +{ +	ssize_t count; + +	count = pm8001_get_gsm_dump(cdev, SYSFS_OFFSET, buf); +	return count; +} + +static DEVICE_ATTR(gsm_log, S_IRUGO, pm8001_ctl_gsm_log_show, NULL); +  #define FLASH_CMD_NONE      0x00  #define FLASH_CMD_UPDATE    0x01  #define FLASH_CMD_SET_NVMD    0x02 @@ -595,7 +732,7 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,  			flash_error_table[i].reason);  } -static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUGO, +static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,  	pm8001_show_update_fw, pm8001_store_update_fw);  struct device_attribute *pm8001_host_attrs[] = {  	&dev_attr_interface_rev, @@ -603,12 +740,17 @@ struct device_attribute *pm8001_host_attrs[] = {  	&dev_attr_update_fw,  	&dev_attr_aap_log,  	&dev_attr_iop_log, +	&dev_attr_fatal_log, +	&dev_attr_gsm_log,  	&dev_attr_max_out_io,  	&dev_attr_max_devices,  	&dev_attr_max_sg_list,  	&dev_attr_sas_spec_support,  	&dev_attr_logging_level,  	&dev_attr_host_sas_address, +	&dev_attr_bios_version, +	&dev_attr_ib_log, +	&dev_attr_ob_log,  	NULL,  }; diff --git a/drivers/scsi/pm8001/pm8001_ctl.h b/drivers/scsi/pm8001/pm8001_ctl.h index 63ad4aa0c42..d0d43a250b9 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.h +++ b/drivers/scsi/pm8001/pm8001_ctl.h @@ -45,6 +45,8 @@  #define HEADER_LEN			28  #define SIZE_OFFSET			16 +#define BIOSOFFSET			56 +#define BIOS_OFFSET_LIMIT		61  #define FLASH_OK                        0x000000  #define FAIL_OPEN_BIOS_FILE             0x000100 @@ -53,5 +55,9 @@  #define FAIL_OUT_MEMORY                 0x000c00  #define FLASH_IN_PROGRESS               0x001000 +#define IB_OB_READ_TIMES                256 +#define SYSFS_OFFSET                    1024 +#define PM80XX_IB_OB_QUEUE_SIZE         (32 * 1024) +#define PM8001_IB_OB_QUEUE_SIZE         (16 * 1024)  #endif /* PM8001_CTL_H_INCLUDED */ diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h index 479c5a7a863..74a4bb9af07 100644 --- a/drivers/scsi/pm8001/pm8001_defs.h +++ b/drivers/scsi/pm8001/pm8001_defs.h @@ -46,7 +46,10 @@ enum chip_flavors {  	chip_8008,  	chip_8009,  	chip_8018, -	chip_8019 +	chip_8019, +	chip_8074, +	chip_8076, +	chip_8077  };  enum phy_speed { @@ -99,7 +102,8 @@ enum memory_region_num {  	NVMD,	    /* NVM device */  	DEV_MEM,    /* memory for devices */  	CCB_MEM,    /* memory for command control block */ -	FW_FLASH    /* memory for fw flash update */ +	FW_FLASH,    /* memory for fw flash update */ +	FORENSIC_MEM  /* memory for fw forensic data */  };  #define	PM8001_EVENT_LOG_SIZE	 (128 * 1024) diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 4a219575219..a97be015e52 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -644,7 +644,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)  	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);  	/* 8081 controllers need BAR shift to access MPI space  	* as this is shared with BIOS data */ -	if (deviceid == 0x8081) { +	if (deviceid == 0x8081 || deviceid == 0x0042) {  		if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {  			PM8001_FAIL_DBG(pm8001_ha,  				pm8001_printk("Shift Bar4 to 0x%x failed\n", @@ -673,7 +673,7 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)  	for (i = 0; i < PM8001_MAX_OUTB_NUM; i++)  		update_outbnd_queue_table(pm8001_ha, i);  	/* 8081 controller donot require these operations */ -	if (deviceid != 0x8081) { +	if (deviceid != 0x8081 && deviceid != 0x0042) {  		mpi_set_phys_g3_with_ssc(pm8001_ha, 0);  		/* 7->130ms, 34->500ms, 119->1.5s */  		mpi_set_open_retry_interval_reg(pm8001_ha, 119); @@ -701,7 +701,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)  	u32 gst_len_mpistate;  	u16 deviceid;  	pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); -	if (deviceid == 0x8081) { +	if (deviceid == 0x8081 || deviceid == 0x0042) {  		if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) {  			PM8001_FAIL_DBG(pm8001_ha,  				pm8001_printk("Shift Bar4 to 0x%x failed\n", @@ -1868,6 +1868,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)  	if (unlikely(!t || !t->lldd_task || !t->dev))  		return;  	ts = &t->task_status; +	/* Print sas address of IO failed device */ +	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && +		(status != IO_UNDERFLOW)) +		PM8001_FAIL_DBG(pm8001_ha, +			pm8001_printk("SAS Address of IO Failure Drive:" +			"%016llx", SAS_ADDR(t->dev->sas_addr))); +  	switch (status) {  	case IO_SUCCESS:  		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" @@ -2276,6 +2283,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  	u32 param;  	u32 status;  	u32 tag; +	int i, j; +	u8 sata_addr_low[4]; +	u32 temp_sata_addr_low; +	u8 sata_addr_hi[4]; +	u32 temp_sata_addr_hi;  	struct sata_completion_resp *psataPayload;  	struct task_status_struct *ts;  	struct ata_task_resp *resp ; @@ -2325,7 +2337,46 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  			pm8001_printk("ts null\n"));  		return;  	} - +	/* Print sas address of IO failed device */ +	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && +		(status != IO_UNDERFLOW)) { +		if (!((t->dev->parent) && +			(DEV_IS_EXPANDER(t->dev->parent->dev_type)))) { +			for (i = 0 , j = 4; j <= 7 && i <= 3; i++ , j++) +				sata_addr_low[i] = pm8001_ha->sas_addr[j]; +			for (i = 0 , j = 0; j <= 3 && i <= 3; i++ , j++) +				sata_addr_hi[i] = pm8001_ha->sas_addr[j]; +			memcpy(&temp_sata_addr_low, sata_addr_low, +				sizeof(sata_addr_low)); +			memcpy(&temp_sata_addr_hi, sata_addr_hi, +				sizeof(sata_addr_hi)); +			temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) +						|((temp_sata_addr_hi << 8) & +						0xff0000) | +						((temp_sata_addr_hi >> 8) +						& 0xff00) | +						((temp_sata_addr_hi << 24) & +						0xff000000)); +			temp_sata_addr_low = ((((temp_sata_addr_low >> 24) +						& 0xff) | +						((temp_sata_addr_low << 8) +						& 0xff0000) | +						((temp_sata_addr_low >> 8) +						& 0xff00) | +						((temp_sata_addr_low << 24) +						& 0xff000000)) + +						pm8001_dev->attached_phy + +						0x10); +			PM8001_FAIL_DBG(pm8001_ha, +				pm8001_printk("SAS Address of IO Failure Drive:" +				"%08x%08x", temp_sata_addr_hi, +					temp_sata_addr_low)); +		} else { +			PM8001_FAIL_DBG(pm8001_ha, +				pm8001_printk("SAS Address of IO Failure Drive:" +				"%016llx", SAS_ADDR(t->dev->sas_addr))); +		} +	}  	switch (status) {  	case IO_SUCCESS:  		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); @@ -2451,11 +2502,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*in order to force CPU ordering*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -2471,11 +2518,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -2499,11 +2542,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  				IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/* ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -2566,11 +2605,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  				    IO_DS_NON_OPERATIONAL);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -2590,11 +2625,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  				    IO_DS_IN_ERROR);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -2623,20 +2654,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  			" resp 0x%x stat 0x%x but aborted by upper layer!\n",  			t, status, ts->resp, ts->stat));  		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -	} else if (t->uldd_task) { -		spin_unlock_irqrestore(&t->task_state_lock, flags); -		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -		mb();/* ditto */ -		spin_unlock_irq(&pm8001_ha->lock); -		t->task_done(t); -		spin_lock_irq(&pm8001_ha->lock); -	} else if (!t->uldd_task) { +	} else {  		spin_unlock_irqrestore(&t->task_state_lock, flags); -		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -		mb();/*ditto*/ -		spin_unlock_irq(&pm8001_ha->lock); -		t->task_done(t); -		spin_lock_irq(&pm8001_ha->lock); +		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  	}  } @@ -2745,11 +2765,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)  				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);  			ts->resp = SAS_TASK_COMPLETE;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -2858,20 +2874,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)  			" resp 0x%x stat 0x%x but aborted by upper layer!\n",  			t, event, ts->resp, ts->stat));  		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -	} else if (t->uldd_task) { -		spin_unlock_irqrestore(&t->task_state_lock, flags); -		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -		mb();/* ditto */ -		spin_unlock_irq(&pm8001_ha->lock); -		t->task_done(t); -		spin_lock_irq(&pm8001_ha->lock); -	} else if (!t->uldd_task) { +	} else {  		spin_unlock_irqrestore(&t->task_state_lock, flags); -		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -		mb();/*ditto*/ -		spin_unlock_irq(&pm8001_ha->lock); -		t->task_done(t); -		spin_lock_irq(&pm8001_ha->lock); +		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  	}  } @@ -3087,8 +3092,8 @@ void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha,  	struct pm8001_device *pm8001_dev = ccb->device;  	u32 status = le32_to_cpu(pPayload->status);  	u32 device_id = le32_to_cpu(pPayload->device_id); -	u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS; -	u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS; +	u8 pds = le32_to_cpu(pPayload->pds_nds) & PDS_BITS; +	u8 nds = le32_to_cpu(pPayload->pds_nds) & NDS_BITS;  	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state "  		"from 0x%x to 0x%x status = 0x%x!\n",  		device_id, pds, nds, status)); @@ -3352,6 +3357,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)  	unsigned long flags;  	u8 deviceType = pPayload->sas_identify.dev_type;  	port->port_state =  portstate; +	phy->phy_state = PHY_STATE_LINK_UP_SPC;  	PM8001_MSG_DBG(pm8001_ha,  		pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n",  		port_id, phy_id)); @@ -3432,6 +3438,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)  		pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d,"  		" phy id = %d\n", port_id, phy_id));  	port->port_state =  portstate; +	phy->phy_state = PHY_STATE_LINK_UP_SPC;  	port->port_attached = 1;  	pm8001_get_lrate_mode(phy, link_rate);  	phy->phy_type |= PORT_TYPE_SATA; @@ -4414,23 +4421,11 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,  					" stat 0x%x but aborted by upper layer "  					"\n", task, ts->resp, ts->stat));  				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); -			} else if (task->uldd_task) { -				spin_unlock_irqrestore(&task->task_state_lock, -							flags); -				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); -				mb();/* ditto */ -				spin_unlock_irq(&pm8001_ha->lock); -				task->task_done(task); -				spin_lock_irq(&pm8001_ha->lock); -				return 0; -			} else if (!task->uldd_task) { +			} else {  				spin_unlock_irqrestore(&task->task_state_lock,  							flags); -				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); -				mb();/*ditto*/ -				spin_unlock_irq(&pm8001_ha->lock); -				task->task_done(task); -				spin_lock_irq(&pm8001_ha->lock); +				pm8001_ccb_task_free_done(pm8001_ha, task, +								ccb, tag);  				return 0;  			}  		} @@ -4700,6 +4695,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha,  	sspTMCmd.tmf = cpu_to_le32(tmf->tmf);  	memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8);  	sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); +	if (pm8001_ha->chip_id != chip_8001) +		sspTMCmd.ds_ads_m = 0x08;  	circularQ = &pm8001_ha->inbnd_q_tbl[0];  	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0);  	return ret; @@ -4778,6 +4775,16 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha,  		    cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo);  		break;  	} +	case IOP_RDUMP: { +		nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | IOP_RDUMP); +		nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); +		nvmd_req.vpd_offset = cpu_to_le32(ioctl_payload->offset); +		nvmd_req.resp_addr_hi = +		cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); +		nvmd_req.resp_addr_lo = +		cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); +		break; +	}  	default:  		break;  	} @@ -4938,6 +4945,84 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,  	return rc;  } +ssize_t +pm8001_get_gsm_dump(struct device *cdev, u32 length, char *buf) +{ +	u32 value, rem, offset = 0, bar = 0; +	u32 index, work_offset, dw_length; +	u32 shift_value, gsm_base, gsm_dump_offset; +	char *direct_data; +	struct Scsi_Host *shost = class_to_shost(cdev); +	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); +	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + +	direct_data = buf; +	gsm_dump_offset = pm8001_ha->fatal_forensic_shift_offset; + +	/* check max is 1 Mbytes */ +	if ((length > 0x100000) || (gsm_dump_offset & 3) || +		((gsm_dump_offset + length) > 0x1000000)) +			return -EINVAL; + +	if (pm8001_ha->chip_id == chip_8001) +		bar = 2; +	else +		bar = 1; + +	work_offset = gsm_dump_offset & 0xFFFF0000; +	offset = gsm_dump_offset & 0x0000FFFF; +	gsm_dump_offset = work_offset; +	/* adjust length to dword boundary */ +	rem = length & 3; +	dw_length = length >> 2; + +	for (index = 0; index < dw_length; index++) { +		if ((work_offset + offset) & 0xFFFF0000) { +			if (pm8001_ha->chip_id == chip_8001) +				shift_value = ((gsm_dump_offset + offset) & +						SHIFT_REG_64K_MASK); +			else +				shift_value = (((gsm_dump_offset + offset) & +						SHIFT_REG_64K_MASK) >> +						SHIFT_REG_BIT_SHIFT); + +			if (pm8001_ha->chip_id == chip_8001) { +				gsm_base = GSM_BASE; +				if (-1 == pm8001_bar4_shift(pm8001_ha, +						(gsm_base + shift_value))) +					return -EIO; +			} else { +				gsm_base = 0; +				if (-1 == pm80xx_bar4_shift(pm8001_ha, +						(gsm_base + shift_value))) +					return -EIO; +			} +			gsm_dump_offset = (gsm_dump_offset + offset) & +						0xFFFF0000; +			work_offset = 0; +			offset = offset & 0x0000FFFF; +		} +		value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & +						0x0000FFFF); +		direct_data += sprintf(direct_data, "%08x ", value); +		offset += 4; +	} +	if (rem != 0) { +		value = pm8001_cr32(pm8001_ha, bar, (work_offset + offset) & +						0x0000FFFF); +		/* xfr for non_dw */ +		direct_data += sprintf(direct_data, "%08x ", value); +	} +	/* Shift back to BAR4 original address */ +	if (-1 == pm8001_bar4_shift(pm8001_ha, 0)) +			return -EIO; +	pm8001_ha->fatal_forensic_shift_offset += 1024; + +	if (pm8001_ha->fatal_forensic_shift_offset >= 0x100000) +		pm8001_ha->fatal_forensic_shift_offset = 0; +	return direct_data - buf; +} +  int  pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha,  	struct pm8001_device *pm8001_dev, u32 state) diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h index d7c1e203422..e4867e690c8 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.h +++ b/drivers/scsi/pm8001/pm8001_hwi.h @@ -131,6 +131,10 @@  #define LINKRATE_30			(0x02 << 8)  #define LINKRATE_60			(0x04 << 8) +/* for phy state */ + +#define PHY_STATE_LINK_UP_SPC		0x1 +  /* for new SPC controllers MEMBASE III is shared between BIOS and DATA */  #define GSM_SM_BASE			0x4F0000  struct mpi_msg_hdr{ @@ -1027,5 +1031,8 @@ struct set_dev_state_resp {  #define DEVREG_FAILURE_PORT_NOT_VALID_STATE		0x06  #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID		0x07 +#define GSM_BASE					0x4F0000 +#define SHIFT_REG_64K_MASK				0xffff0000 +#define SHIFT_REG_BIT_SHIFT				8  #endif diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index f7c189606b8..e90c89f1d48 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -54,6 +54,9 @@ static const struct pm8001_chip_info pm8001_chips[] = {  	[chip_8009] = {1,  8, &pm8001_80xx_dispatch,},  	[chip_8018] = {0,  16, &pm8001_80xx_dispatch,},  	[chip_8019] = {1,  16, &pm8001_80xx_dispatch,}, +	[chip_8074] = {0,  8, &pm8001_80xx_dispatch,}, +	[chip_8076] = {0,  16, &pm8001_80xx_dispatch,}, +	[chip_8077] = {0,  16, &pm8001_80xx_dispatch,},  };  static int pm8001_id; @@ -172,20 +175,16 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)  static void pm8001_tasklet(unsigned long opaque)  {  	struct pm8001_hba_info *pm8001_ha; -	u32 vec; -	pm8001_ha = (struct pm8001_hba_info *)opaque; +	struct isr_param *irq_vector; + +	irq_vector = (struct isr_param *)opaque; +	pm8001_ha = irq_vector->drv_inst;  	if (unlikely(!pm8001_ha))  		BUG_ON(1); -	vec = pm8001_ha->int_vector; -	PM8001_CHIP_DISP->isr(pm8001_ha, vec); +	PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);  }  #endif -static struct  pm8001_hba_info *outq_to_hba(u8 *outq) -{ -	return container_of((outq - *outq), struct pm8001_hba_info, outq[0]); -} -  /**   * pm8001_interrupt_handler_msix - main MSIX interrupt handler.   * It obtains the vector number and calls the equivalent bottom @@ -195,18 +194,20 @@ static struct  pm8001_hba_info *outq_to_hba(u8 *outq)   */  static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque)  { -	struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque); -	u8 outq = *(u8 *)opaque; +	struct isr_param *irq_vector; +	struct pm8001_hba_info *pm8001_ha;  	irqreturn_t ret = IRQ_HANDLED; +	irq_vector = (struct isr_param *)opaque; +	pm8001_ha = irq_vector->drv_inst; +  	if (unlikely(!pm8001_ha))  		return IRQ_NONE;  	if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))  		return IRQ_NONE; -	pm8001_ha->int_vector = outq;  #ifdef PM8001_USE_TASKLET -	tasklet_schedule(&pm8001_ha->tasklet); +	tasklet_schedule(&pm8001_ha->tasklet[irq_vector->irq_id]);  #else -	ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq); +	ret = PM8001_CHIP_DISP->isr(pm8001_ha, irq_vector->irq_id);  #endif  	return ret;  } @@ -227,9 +228,8 @@ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id)  	if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha))  		return IRQ_NONE; -	pm8001_ha->int_vector = 0;  #ifdef PM8001_USE_TASKLET -	tasklet_schedule(&pm8001_ha->tasklet); +	tasklet_schedule(&pm8001_ha->tasklet[0]);  #else  	ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0);  #endif @@ -344,6 +344,10 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha,  	/* Memory region for fw flash */  	pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; +	pm8001_ha->memoryMap.region[FORENSIC_MEM].num_elements = 1; +	pm8001_ha->memoryMap.region[FORENSIC_MEM].total_len = 0x10000; +	pm8001_ha->memoryMap.region[FORENSIC_MEM].element_size = 0x10000; +	pm8001_ha->memoryMap.region[FORENSIC_MEM].alignment = 0x10000;  	for (i = 0; i < USI_MAX_MEMCNT; i++) {  		if (pm8001_mem_alloc(pm8001_ha->pdev,  			&pm8001_ha->memoryMap.region[i].virt_ptr, @@ -450,7 +454,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,  {  	struct pm8001_hba_info *pm8001_ha;  	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); - +	int j;  	pm8001_ha = sha->lldd_ha;  	if (!pm8001_ha) @@ -473,12 +477,14 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,  		pm8001_ha->iomb_size = IOMB_SIZE_SPC;  #ifdef PM8001_USE_TASKLET -	/** -	* default tasklet for non msi-x interrupt handler/first msi-x -	* interrupt handler -	**/ -	tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, -			(unsigned long)pm8001_ha); +	/* Tasklet for non msi-x interrupt handler */ +	if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) +		tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, +			(unsigned long)&(pm8001_ha->irq_vector[0])); +	else +		for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) +			tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, +				(unsigned long)&(pm8001_ha->irq_vector[j]));  #endif  	pm8001_ioremap(pm8001_ha);  	if (!pm8001_alloc(pm8001_ha, ent)) @@ -619,7 +625,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)  	pm8001_ha->nvmd_completion = &completion;  	if (pm8001_ha->chip_id == chip_8001) { -		if (deviceid == 0x8081) { +		if (deviceid == 0x8081 || deviceid == 0x0042) {  			payload.minor_function = 4;  			payload.length = 4096;  		} else { @@ -640,6 +646,9 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)  			if (deviceid == 0x8081)  				pm8001_ha->sas_addr[j] =  					payload.func_specific[0x704 + i]; +			else if (deviceid == 0x0042) +				pm8001_ha->sas_addr[j] = +					payload.func_specific[0x010 + i];  		} else  			pm8001_ha->sas_addr[j] =  					payload.func_specific[0x804 + i]; @@ -664,6 +673,35 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)  #endif  } +/* + * pm8001_get_phy_settings_info : Read phy setting values. + * @pm8001_ha : our hba. + */ +static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) +{ + +#ifdef PM8001_READ_VPD +	/*OPTION ROM FLASH read for the SPC cards */ +	DECLARE_COMPLETION_ONSTACK(completion); +	struct pm8001_ioctl_payload payload; + +	pm8001_ha->nvmd_completion = &completion; +	/* SAS ADDRESS read from flash / EEPROM */ +	payload.minor_function = 6; +	payload.offset = 0; +	payload.length = 4096; +	payload.func_specific = kzalloc(4096, GFP_KERNEL); +	if (!payload.func_specific) +		return -ENOMEM; +	/* Read phy setting values from flash */ +	PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); +	wait_for_completion(&completion); +	pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); +	kfree(payload.func_specific); +#endif +	return 0; +} +  #ifdef PM8001_USE_MSIX  /**   * pm8001_setup_msix - enable MSI-X interrupt @@ -682,11 +720,9 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)  	/* SPCv controllers supports 64 msi-x */  	if (pm8001_ha->chip_id == chip_8001) {  		number_of_intr = 1; -		flag |= IRQF_DISABLED;  	} else {  		number_of_intr = PM8001_MAX_MSIX_VEC;  		flag &= ~IRQF_SHARED; -		flag |= IRQF_DISABLED;  	}  	max_entry = sizeof(pm8001_ha->msix_entries) / @@ -701,19 +737,20 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)  			"pci_enable_msix request ret:%d no of intr %d\n",  					rc, pm8001_ha->number_of_intr)); -		for (i = 0; i < number_of_intr; i++) -			pm8001_ha->outq[i] = i;  		for (i = 0; i < number_of_intr; i++) {  			snprintf(intr_drvname[i], sizeof(intr_drvname[0]),  					DRV_NAME"%d", i); +			pm8001_ha->irq_vector[i].irq_id = i; +			pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; +  			if (request_irq(pm8001_ha->msix_entries[i].vector,  				pm8001_interrupt_handler_msix, flag, -				intr_drvname[i], &pm8001_ha->outq[i])) { +				intr_drvname[i], &(pm8001_ha->irq_vector[i]))) {  				for (j = 0; j < i; j++)  					free_irq(  					pm8001_ha->msix_entries[j].vector, -					&pm8001_ha->outq[j]); +					&(pm8001_ha->irq_vector[i]));  				pci_disable_msix(pm8001_ha->pdev);  				break;  			} @@ -844,6 +881,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev,  	}  	pm8001_init_sas_add(pm8001_ha); +	/* phy setting support for motherboard controller */ +	if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 && +		pdev->subsystem_vendor != 0) { +		rc = pm8001_get_phy_settings_info(pm8001_ha); +		if (rc) +			goto err_out_shost; +	}  	pm8001_post_sas_ha_init(shost, chip);  	rc = sas_register_ha(SHOST_TO_SAS_HA(shost));  	if (rc) @@ -871,9 +915,8 @@ static void pm8001_pci_remove(struct pci_dev *pdev)  {  	struct sas_ha_struct *sha = pci_get_drvdata(pdev);  	struct pm8001_hba_info *pm8001_ha; -	int i; +	int i, j;  	pm8001_ha = sha->lldd_ha; -	pci_set_drvdata(pdev, NULL);  	sas_unregister_ha(sha);  	sas_remove_host(pm8001_ha->shost);  	list_del(&pm8001_ha->list); @@ -886,13 +929,18 @@ static void pm8001_pci_remove(struct pci_dev *pdev)  		synchronize_irq(pm8001_ha->msix_entries[i].vector);  	for (i = 0; i < pm8001_ha->number_of_intr; i++)  		free_irq(pm8001_ha->msix_entries[i].vector, -				&pm8001_ha->outq[i]); +				&(pm8001_ha->irq_vector[i]));  	pci_disable_msix(pdev);  #else  	free_irq(pm8001_ha->irq, sha);  #endif  #ifdef PM8001_USE_TASKLET -	tasklet_kill(&pm8001_ha->tasklet); +	/* For non-msix and msix interrupts */ +	if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) +		tasklet_kill(&pm8001_ha->tasklet[0]); +	else +		for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) +			tasklet_kill(&pm8001_ha->tasklet[j]);  #endif  	pm8001_free(pm8001_ha);  	kfree(sha->sas_phy); @@ -913,7 +961,7 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)  {  	struct sas_ha_struct *sha = pci_get_drvdata(pdev);  	struct pm8001_hba_info *pm8001_ha; -	int i; +	int  i, j;  	u32 device_state;  	pm8001_ha = sha->lldd_ha;  	flush_workqueue(pm8001_wq); @@ -929,13 +977,18 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state)  		synchronize_irq(pm8001_ha->msix_entries[i].vector);  	for (i = 0; i < pm8001_ha->number_of_intr; i++)  		free_irq(pm8001_ha->msix_entries[i].vector, -				&pm8001_ha->outq[i]); +				&(pm8001_ha->irq_vector[i]));  	pci_disable_msix(pdev);  #else  	free_irq(pm8001_ha->irq, sha);  #endif  #ifdef PM8001_USE_TASKLET -	tasklet_kill(&pm8001_ha->tasklet); +	/* For non-msix and msix interrupts */ +	if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) +		tasklet_kill(&pm8001_ha->tasklet[0]); +	else +		for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) +			tasklet_kill(&pm8001_ha->tasklet[j]);  #endif  	device_state = pci_choose_state(pdev, state);  	pm8001_printk("pdev=0x%p, slot=%s, entering " @@ -958,7 +1011,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev)  	struct sas_ha_struct *sha = pci_get_drvdata(pdev);  	struct pm8001_hba_info *pm8001_ha;  	int rc; -	u8 i = 0; +	u8 i = 0, j;  	u32 device_state;  	pm8001_ha = sha->lldd_ha;  	device_state = pdev->current_state; @@ -998,10 +1051,14 @@ static int pm8001_pci_resume(struct pci_dev *pdev)  	if (rc)  		goto err_out_disable;  #ifdef PM8001_USE_TASKLET -	/* default tasklet for non msi-x interrupt handler/first msi-x -	* interrupt handler */ -	tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, -			(unsigned long)pm8001_ha); +	/*  Tasklet for non msi-x interrupt handler */ +	if ((!pdev->msix_cap) || (pm8001_ha->chip_id == chip_8001)) +		tasklet_init(&pm8001_ha->tasklet[0], pm8001_tasklet, +			(unsigned long)&(pm8001_ha->irq_vector[0])); +	else +		for (j = 0; j < PM8001_MAX_MSIX_VEC; j++) +			tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet, +				(unsigned long)&(pm8001_ha->irq_vector[j]));  #endif  	PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0);  	if (pm8001_ha->chip_id != chip_8001) { @@ -1023,10 +1080,7 @@ err_out_enable:   */  static struct pci_device_id pm8001_pci_table[] = {  	{ PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, -	{ -		PCI_DEVICE(0x117c, 0x0042), -		.driver_data = chip_8001 -	}, +	{ PCI_VDEVICE(ATTO, 0x0042), chip_8001 },  	/* Support for SPC/SPCv/SPCve controllers */  	{ PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },  	{ PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 }, @@ -1037,6 +1091,12 @@ static struct pci_device_id pm8001_pci_table[] = {  	{ PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 },  	{ PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 },  	{ PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, +	{ PCI_VDEVICE(PMC_Sierra, 0x8074), chip_8074 }, +	{ PCI_VDEVICE(ADAPTEC2, 0x8074), chip_8074 }, +	{ PCI_VDEVICE(PMC_Sierra, 0x8076), chip_8076 }, +	{ PCI_VDEVICE(ADAPTEC2, 0x8076), chip_8076 }, +	{ PCI_VDEVICE(PMC_Sierra, 0x8077), chip_8077 }, +	{ PCI_VDEVICE(ADAPTEC2, 0x8077), chip_8077 },  	{ PCI_VENDOR_ID_ADAPTEC2, 0x8081,  		PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 },  	{ PCI_VENDOR_ID_ADAPTEC2, 0x8081, @@ -1057,6 +1117,24 @@ static struct pci_device_id pm8001_pci_table[] = {  		PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 },  	{ PCI_VENDOR_ID_ADAPTEC2, 0x8089,  		PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, +	{ PCI_VENDOR_ID_ADAPTEC2, 0x8074, +		PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8074 }, +	{ PCI_VENDOR_ID_ADAPTEC2, 0x8076, +		PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8076 }, +	{ PCI_VENDOR_ID_ADAPTEC2, 0x8077, +		PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8077 }, +	{ PCI_VENDOR_ID_ADAPTEC2, 0x8074, +		PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8074 }, +	{ PCI_VENDOR_ID_ADAPTEC2, 0x8076, +		PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8076 }, +	{ PCI_VENDOR_ID_ADAPTEC2, 0x8077, +		PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8077 }, +	{ PCI_VENDOR_ID_ADAPTEC2, 0x8076, +		PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8076 }, +	{ PCI_VENDOR_ID_ADAPTEC2, 0x8077, +		PCI_VENDOR_ID_ADAPTEC2, 0x0808, 0, 0, chip_8077 }, +	{ PCI_VENDOR_ID_ADAPTEC2, 0x8074, +		PCI_VENDOR_ID_ADAPTEC2, 0x0404, 0, 0, chip_8074 },  	{} /* terminate list */  }; @@ -1108,8 +1186,12 @@ module_init(pm8001_init);  module_exit(pm8001_exit);  MODULE_AUTHOR("Jack Wang <jack_wang@usish.com>"); +MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>"); +MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>"); +MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");  MODULE_DESCRIPTION( -		"PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver"); +		"PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 " +		"SAS/SATA controller driver");  MODULE_VERSION(DRV_VERSION);  MODULE_LICENSE("GPL");  MODULE_DEVICE_TABLE(pci, pm8001_pci_table); diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index a85d73de7c8..8a44bc92bc7 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -434,6 +434,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,  		ccb->n_elem = n_elem;  		ccb->ccb_tag = tag;  		ccb->task = t; +		ccb->device = pm8001_dev;  		switch (t->task_proto) {  		case SAS_PROTOCOL_SMP:  			rc = pm8001_task_prep_smp(pm8001_ha, ccb); @@ -447,7 +448,6 @@ static int pm8001_task_exec(struct sas_task *task, const int num,  			break;  		case SAS_PROTOCOL_SATA:  		case SAS_PROTOCOL_STP: -		case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:  			rc = pm8001_task_prep_ata(pm8001_ha, ccb);  			break;  		default: @@ -704,6 +704,8 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,  	int res, retry;  	struct sas_task *task = NULL;  	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); +	struct pm8001_device *pm8001_dev = dev->lldd_dev; +	DECLARE_COMPLETION_ONSTACK(completion_setstate);  	for (retry = 0; retry < 3; retry++) {  		task = sas_alloc_slow_task(GFP_KERNEL); @@ -729,6 +731,12 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,  			goto ex_err;  		}  		wait_for_completion(&task->slow_task->completion); +		if (pm8001_ha->chip_id != chip_8001) { +			pm8001_dev->setds_completion = &completion_setstate; +				PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, +					pm8001_dev, 0x01); +			wait_for_completion(&completion_setstate); +		}  		res = -TMF_RESP_FUNC_FAILED;  		/* Even TMF timed out, return direct. */  		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { @@ -858,13 +866,11 @@ ex_err:  static void pm8001_dev_gone_notify(struct domain_device *dev)  {  	unsigned long flags = 0; -	u32 tag;  	struct pm8001_hba_info *pm8001_ha;  	struct pm8001_device *pm8001_dev = dev->lldd_dev;  	pm8001_ha = pm8001_find_ha_by_dev(dev);  	spin_lock_irqsave(&pm8001_ha->lock, flags); -	pm8001_tag_alloc(pm8001_ha, &tag);  	if (pm8001_dev) {  		u32 device_id = pm8001_dev->device_id; @@ -1091,15 +1097,17 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)  	struct pm8001_tmf_task tmf_task;  	struct pm8001_device *pm8001_dev = dev->lldd_dev;  	struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); +	DECLARE_COMPLETION_ONSTACK(completion_setstate);  	if (dev_is_sata(dev)) {  		struct sas_phy *phy = sas_get_local_phy(dev);  		rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,  			dev, 1, 0);  		rc = sas_phy_reset(phy, 1);  		sas_put_local_phy(phy); +		pm8001_dev->setds_completion = &completion_setstate;  		rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,  			pm8001_dev, 0x01); -		msleep(2000); +		wait_for_completion(&completion_setstate);  	} else {  		tmf_task.tmf = TMF_LU_RESET;  		rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 570819464d9..1ee06f21803 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -104,6 +104,9 @@ do {						\  #define DEV_IS_EXPANDER(type)	((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) +#define IS_SPCV_12G(dev)	((dev->device == 0X8074)		\ +				|| (dev->device == 0X8076)		\ +				|| (dev->device == 0X8077))  #define PM8001_NAME_LENGTH		32/* generic length of strings */  extern struct list_head hba_list; @@ -129,6 +132,61 @@ struct pm8001_ioctl_payload {  	u8	*func_specific;  }; +#define MPI_FATAL_ERROR_TABLE_OFFSET_MASK 0xFFFFFF +#define MPI_FATAL_ERROR_TABLE_SIZE(value) ((0xFF000000 & value) >> SHIFT24) +#define MPI_FATAL_EDUMP_TABLE_LO_OFFSET            0x00     /* HNFBUFL */ +#define MPI_FATAL_EDUMP_TABLE_HI_OFFSET            0x04     /* HNFBUFH */ +#define MPI_FATAL_EDUMP_TABLE_LENGTH               0x08     /* HNFBLEN */ +#define MPI_FATAL_EDUMP_TABLE_HANDSHAKE            0x0C     /* FDDHSHK */ +#define MPI_FATAL_EDUMP_TABLE_STATUS               0x10     /* FDDTSTAT */ +#define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN            0x14     /* ACCDDLEN */ +#define MPI_FATAL_EDUMP_HANDSHAKE_RDY              0x1 +#define MPI_FATAL_EDUMP_HANDSHAKE_BUSY             0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_RSVD                 0x0 +#define MPI_FATAL_EDUMP_TABLE_STAT_DMA_FAILED           0x1 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_MORE_DATA 0x2 +#define MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE      0x3 +#define TYPE_GSM_SPACE        1 +#define TYPE_QUEUE            2 +#define TYPE_FATAL            3 +#define TYPE_NON_FATAL        4 +#define TYPE_INBOUND          1 +#define TYPE_OUTBOUND         2 +struct forensic_data { +	u32  data_type; +	union { +		struct { +			u32  direct_len; +			u32  direct_offset; +			void  *direct_data; +		} gsm_buf; +		struct { +			u16  queue_type; +			u16  queue_index; +			u32  direct_len; +			void  *direct_data; +		} queue_buf; +		struct { +			u32  direct_len; +			u32  direct_offset; +			u32  read_len; +			void  *direct_data; +		} data_buf; +	}; +}; + +/* bit31-26 - mask bar */ +#define SCRATCH_PAD0_BAR_MASK                    0xFC000000 +/* bit25-0  - offset mask */ +#define SCRATCH_PAD0_OFFSET_MASK                 0x03FFFFFF +/* if AAP error state */ +#define SCRATCH_PAD0_AAPERR_MASK                 0xFFFFFFFF +/* Inbound doorbell bit7 */ +#define SPCv_MSGU_CFG_TABLE_NONFATAL_DUMP	 0x80 +/* Inbound doorbell bit7 SPCV */ +#define SPCV_MSGU_CFG_TABLE_TRANSFER_DEBUG_INFO  0x80 +#define MAIN_MERRDCTO_MERRDCES		         0xA0/* DWORD 0x28) */ +  struct pm8001_dispatch {  	char *name;  	int (*chip_init)(struct pm8001_hba_info *pm8001_ha); @@ -343,6 +401,7 @@ union main_cfg_table {  	u32			phy_attr_table_offset;  	u32			port_recovery_timer;  	u32			interrupt_reassertion_delay; +	u32			fatal_n_non_fatal_dump;	        /* 0x28 */  	} pm80xx_tbl;  }; @@ -407,6 +466,10 @@ struct pm8001_hba_memspace {  	u64			membase;  	u32			memsize;  }; +struct isr_param { +	struct pm8001_hba_info *drv_inst; +	u32 irq_id; +};  struct pm8001_hba_info {  	char			name[PM8001_NAME_LENGTH];  	struct list_head	list; @@ -417,6 +480,13 @@ struct pm8001_hba_info {  	struct pm8001_hba_memspace io_mem[6];  	struct mpi_mem_req	memoryMap;  	struct encrypt		encrypt_info; /* support encryption */ +	struct forensic_data	forensic_info; +	u32			fatal_bar_loc; +	u32			forensic_last_offset; +	u32			fatal_forensic_shift_offset; +	u32			forensic_fatal_step; +	u32			evtlog_ib_offset; +	u32			evtlog_ob_offset;  	void __iomem	*msg_unit_tbl_addr;/*Message Unit Table Addr*/  	void __iomem	*main_cfg_tbl_addr;/*Main Config Table Addr*/  	void __iomem	*general_stat_tbl_addr;/*General Status Table Addr*/ @@ -425,6 +495,7 @@ struct pm8001_hba_info {  	void __iomem	*pspa_q_tbl_addr;  			/*MPI SAS PHY attributes Queue Config Table Addr*/  	void __iomem	*ivt_tbl_addr; /*MPI IVT Table Addr */ +	void __iomem	*fatal_tbl_addr; /*MPI IVT Table Addr */  	union main_cfg_table	main_cfg_tbl;  	union general_status_table	gs_tbl;  	struct inbound_queue_table	inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; @@ -452,14 +523,13 @@ struct pm8001_hba_info {  	int			number_of_intr;/*will be used in remove()*/  #endif  #ifdef PM8001_USE_TASKLET -	struct tasklet_struct	tasklet; +	struct tasklet_struct	tasklet[PM8001_MAX_MSIX_VEC];  #endif  	u32			logging_level;  	u32			fw_status;  	u32			smp_exp_mode; -	u32			int_vector;  	const struct firmware 	*fw_image; -	u8			outq[PM8001_MAX_MSIX_VEC]; +	struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];  };  struct pm8001_work { @@ -629,9 +699,26 @@ struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,  int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha);  int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); - +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, +	u32 length, u8 *buf); +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); +ssize_t pm80xx_get_fatal_dump(struct device *cdev, +		struct device_attribute *attr, char *buf); +ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);  /* ctl shared API */  extern struct device_attribute *pm8001_host_attrs[]; +static inline void +pm8001_ccb_task_free_done(struct pm8001_hba_info *pm8001_ha, +			struct sas_task *task, struct pm8001_ccb_info *ccb, +			u32 ccb_idx) +{ +	pm8001_ccb_task_free(pm8001_ha, task, ccb, ccb_idx); +	smp_mb(); /*in order to force CPU ordering*/ +	spin_unlock(&pm8001_ha->lock); +	task->task_done(task); +	spin_lock(&pm8001_ha->lock); +} +  #endif diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 9f91030211e..d70587f9618 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -45,6 +45,221 @@  #define SMP_DIRECT 1  #define SMP_INDIRECT 2 + + +int pm80xx_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shift_value) +{ +	u32 reg_val; +	unsigned long start; +	pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, shift_value); +	/* confirm the setting is written */ +	start = jiffies + HZ; /* 1 sec */ +	do { +		reg_val = pm8001_cr32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER); +	} while ((reg_val != shift_value) && time_before(jiffies, start)); +	if (reg_val != shift_value) { +		PM8001_FAIL_DBG(pm8001_ha, +			pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" +			" = 0x%x\n", reg_val)); +		return -1; +	} +	return 0; +} + +void pm80xx_pci_mem_copy(struct pm8001_hba_info  *pm8001_ha, u32 soffset, +				const void *destination, +				u32 dw_count, u32 bus_base_number) +{ +	u32 index, value, offset; +	u32 *destination1; +	destination1 = (u32 *)destination; + +	for (index = 0; index < dw_count; index += 4, destination1++) { +		offset = (soffset + index / 4); +		if (offset < (64 * 1024)) { +			value = pm8001_cr32(pm8001_ha, bus_base_number, offset); +			*destination1 =  cpu_to_le32(value); +		} +	} +	return; +} + +ssize_t pm80xx_get_fatal_dump(struct device *cdev, +	struct device_attribute *attr, char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(cdev); +	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); +	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; +	void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; +	u32 accum_len , reg_val, index, *temp; +	unsigned long start; +	u8 *direct_data; +	char *fatal_error_data = buf; + +	pm8001_ha->forensic_info.data_buf.direct_data = buf; +	if (pm8001_ha->chip_id == chip_8001) { +		pm8001_ha->forensic_info.data_buf.direct_data += +			sprintf(pm8001_ha->forensic_info.data_buf.direct_data, +			"Not supported for SPC controller"); +		return (char *)pm8001_ha->forensic_info.data_buf.direct_data - +			(char *)buf; +	} +	if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { +		PM8001_IO_DBG(pm8001_ha, +		pm8001_printk("forensic_info TYPE_NON_FATAL..............\n")); +		direct_data = (u8 *)fatal_error_data; +		pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; +		pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; +		pm8001_ha->forensic_info.data_buf.read_len = 0; + +		pm8001_ha->forensic_info.data_buf.direct_data = direct_data; + +		/* start to get data */ +		/* Program the MEMBASE II Shifting Register with 0x00.*/ +		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, +				pm8001_ha->fatal_forensic_shift_offset); +		pm8001_ha->forensic_last_offset = 0; +		pm8001_ha->forensic_fatal_step = 0; +		pm8001_ha->fatal_bar_loc = 0; +	} + +	/* Read until accum_len is retrived */ +	accum_len = pm8001_mr32(fatal_table_address, +				MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); +	PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n", +						accum_len)); +	if (accum_len == 0xFFFFFFFF) { +		PM8001_IO_DBG(pm8001_ha, +			pm8001_printk("Possible PCI issue 0x%x not expected\n", +				accum_len)); +		return -EIO; +	} +	if (accum_len == 0 || accum_len >= 0x100000) { +		pm8001_ha->forensic_info.data_buf.direct_data += +			sprintf(pm8001_ha->forensic_info.data_buf.direct_data, +				"%08x ", 0xFFFFFFFF); +		return (char *)pm8001_ha->forensic_info.data_buf.direct_data - +			(char *)buf; +	} +	temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; +	if (pm8001_ha->forensic_fatal_step == 0) { +moreData: +		if (pm8001_ha->forensic_info.data_buf.direct_data) { +			/* Data is in bar, copy to host memory */ +			pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc, +			 pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, +				pm8001_ha->forensic_info.data_buf.direct_len , +					1); +		} +		pm8001_ha->fatal_bar_loc += +			pm8001_ha->forensic_info.data_buf.direct_len; +		pm8001_ha->forensic_info.data_buf.direct_offset += +			pm8001_ha->forensic_info.data_buf.direct_len; +		pm8001_ha->forensic_last_offset	+= +			pm8001_ha->forensic_info.data_buf.direct_len; +		pm8001_ha->forensic_info.data_buf.read_len = +			pm8001_ha->forensic_info.data_buf.direct_len; + +		if (pm8001_ha->forensic_last_offset  >= accum_len) { +			pm8001_ha->forensic_info.data_buf.direct_data += +			sprintf(pm8001_ha->forensic_info.data_buf.direct_data, +				"%08x ", 3); +			for (index = 0; index < (SYSFS_OFFSET / 4); index++) { +				pm8001_ha->forensic_info.data_buf.direct_data += +					sprintf(pm8001_ha-> +					 forensic_info.data_buf.direct_data, +						"%08x ", *(temp + index)); +			} + +			pm8001_ha->fatal_bar_loc = 0; +			pm8001_ha->forensic_fatal_step = 1; +			pm8001_ha->fatal_forensic_shift_offset = 0; +			pm8001_ha->forensic_last_offset	= 0; +			return (char *)pm8001_ha-> +				forensic_info.data_buf.direct_data - +				(char *)buf; +		} +		if (pm8001_ha->fatal_bar_loc < (64 * 1024)) { +			pm8001_ha->forensic_info.data_buf.direct_data += +				sprintf(pm8001_ha-> +					forensic_info.data_buf.direct_data, +					"%08x ", 2); +			for (index = 0; index < (SYSFS_OFFSET / 4); index++) { +				pm8001_ha->forensic_info.data_buf.direct_data += +					sprintf(pm8001_ha-> +					forensic_info.data_buf.direct_data, +					"%08x ", *(temp + index)); +			} +			return (char *)pm8001_ha-> +				forensic_info.data_buf.direct_data - +				(char *)buf; +		} + +		/* Increment the MEMBASE II Shifting Register value by 0x100.*/ +		pm8001_ha->forensic_info.data_buf.direct_data += +			sprintf(pm8001_ha->forensic_info.data_buf.direct_data, +				"%08x ", 2); +		for (index = 0; index < 256; index++) { +			pm8001_ha->forensic_info.data_buf.direct_data += +				sprintf(pm8001_ha-> +					forensic_info.data_buf.direct_data, +						"%08x ", *(temp + index)); +		} +		pm8001_ha->fatal_forensic_shift_offset += 0x100; +		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, +			pm8001_ha->fatal_forensic_shift_offset); +		pm8001_ha->fatal_bar_loc = 0; +		return (char *)pm8001_ha->forensic_info.data_buf.direct_data - +			(char *)buf; +	} +	if (pm8001_ha->forensic_fatal_step == 1) { +		pm8001_ha->fatal_forensic_shift_offset = 0; +		/* Read 64K of the debug data. */ +		pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, +			pm8001_ha->fatal_forensic_shift_offset); +		pm8001_mw32(fatal_table_address, +			MPI_FATAL_EDUMP_TABLE_HANDSHAKE, +				MPI_FATAL_EDUMP_HANDSHAKE_RDY); + +		/* Poll FDDHSHK  until clear  */ +		start = jiffies + (2 * HZ); /* 2 sec */ + +		do { +			reg_val = pm8001_mr32(fatal_table_address, +					MPI_FATAL_EDUMP_TABLE_HANDSHAKE); +		} while ((reg_val) && time_before(jiffies, start)); + +		if (reg_val != 0) { +			PM8001_FAIL_DBG(pm8001_ha, +			pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" +			" = 0x%x\n", reg_val)); +			return -EIO; +		} + +		/* Read the next 64K of the debug data. */ +		pm8001_ha->forensic_fatal_step = 0; +		if (pm8001_mr32(fatal_table_address, +			MPI_FATAL_EDUMP_TABLE_STATUS) != +				MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { +			pm8001_mw32(fatal_table_address, +				MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0); +			goto moreData; +		} else { +			pm8001_ha->forensic_info.data_buf.direct_data += +				sprintf(pm8001_ha-> +					forensic_info.data_buf.direct_data, +						"%08x ", 4); +			pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF; +			pm8001_ha->forensic_info.data_buf.direct_len =  0; +			pm8001_ha->forensic_info.data_buf.direct_offset = 0; +			pm8001_ha->forensic_info.data_buf.read_len = 0; +		} +	} + +	return (char *)pm8001_ha->forensic_info.data_buf.direct_data - +		(char *)buf; +} +  /**   * read_main_config_table - read the configure table and save it.   * @pm8001_ha: our hba card information @@ -430,7 +645,11 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)  	table is updated */  	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE);  	/* wait until Inbound DoorBell Clear Register toggled */ -	max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */ +	if (IS_SPCV_12G(pm8001_ha->pdev)) { +		max_wait_count = 4 * 1000 * 1000;/* 4 sec */ +	} else { +		max_wait_count = 2 * 1000 * 1000;/* 2 sec */ +	}  	do {  		udelay(1);  		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); @@ -579,6 +798,9 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)  	pm8001_ha->pspa_q_tbl_addr =  		base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) &  					0xFFFFFF); +	pm8001_ha->fatal_tbl_addr = +		base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0xA0) & +					0xFFFFFF);  	PM8001_INIT_DBG(pm8001_ha,  			pm8001_printk("GST OFFSET 0x%x\n", @@ -913,7 +1135,11 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)  	pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);  	/* wait until Inbound DoorBell Clear Register toggled */ -	max_wait_count = 2 * 1000 * 1000;	/* 2 sec for spcv/ve */ +	if (IS_SPCV_12G(pm8001_ha->pdev)) { +		max_wait_count = 4 * 1000 * 1000;/* 4 sec */ +	} else { +		max_wait_count = 2 * 1000 * 1000;/* 2 sec */ +	}  	do {  		udelay(1);  		value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); @@ -959,6 +1185,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)  {  	u32 regval;  	u32 bootloader_state; +	u32 ibutton0, ibutton1;  	/* Check if MPI is in ready state to reset */  	if (mpi_uninit_check(pm8001_ha) != 0) { @@ -1017,7 +1244,27 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)  	if (-1 == check_fw_ready(pm8001_ha)) {  		PM8001_FAIL_DBG(pm8001_ha,  			pm8001_printk("Firmware is not ready!\n")); -		return -EBUSY; +		/* check iButton feature support for motherboard controller */ +		if (pm8001_ha->pdev->subsystem_vendor != +			PCI_VENDOR_ID_ADAPTEC2 && +			pm8001_ha->pdev->subsystem_vendor != 0) { +			ibutton0 = pm8001_cr32(pm8001_ha, 0, +					MSGU_HOST_SCRATCH_PAD_6); +			ibutton1 = pm8001_cr32(pm8001_ha, 0, +					MSGU_HOST_SCRATCH_PAD_7); +			if (!ibutton0 && !ibutton1) { +				PM8001_FAIL_DBG(pm8001_ha, +					pm8001_printk("iButton Feature is" +					" not Available!!!\n")); +				return -EBUSY; +			} +			if (ibutton0 == 0xdeadbeef && ibutton1 == 0xdeadbeef) { +				PM8001_FAIL_DBG(pm8001_ha, +					pm8001_printk("CRC Check for iButton" +					" Feature Failed!!!\n")); +				return -EBUSY; +			} +		}  	}  	PM8001_INIT_DBG(pm8001_ha,  		pm8001_printk("SPCv soft reset Complete\n")); @@ -1268,6 +1515,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)  	if (unlikely(!t || !t->lldd_task || !t->dev))  		return;  	ts = &t->task_status; +	/* Print sas address of IO failed device */ +	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && +		(status != IO_UNDERFLOW)) +		PM8001_FAIL_DBG(pm8001_ha, +			pm8001_printk("SAS Address of IO Failure Drive" +			":%016llx", SAS_ADDR(t->dev->sas_addr))); +  	switch (status) {  	case IO_SUCCESS:  		PM8001_IO_DBG(pm8001_ha, @@ -1691,6 +1945,10 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  	u32 param;  	u32 status;  	u32 tag; +	int i, j; +	u8 sata_addr_low[4]; +	u32 temp_sata_addr_low, temp_sata_addr_hi; +	u8 sata_addr_hi[4];  	struct sata_completion_resp *psataPayload;  	struct task_status_struct *ts;  	struct ata_task_resp *resp ; @@ -1740,7 +1998,47 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  			pm8001_printk("ts null\n"));  		return;  	} +	/* Print sas address of IO failed device */ +	if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && +		(status != IO_UNDERFLOW)) { +		if (!((t->dev->parent) && +			(DEV_IS_EXPANDER(t->dev->parent->dev_type)))) { +			for (i = 0 , j = 4; i <= 3 && j <= 7; i++ , j++) +				sata_addr_low[i] = pm8001_ha->sas_addr[j]; +			for (i = 0 , j = 0; i <= 3 && j <= 3; i++ , j++) +				sata_addr_hi[i] = pm8001_ha->sas_addr[j]; +			memcpy(&temp_sata_addr_low, sata_addr_low, +				sizeof(sata_addr_low)); +			memcpy(&temp_sata_addr_hi, sata_addr_hi, +				sizeof(sata_addr_hi)); +			temp_sata_addr_hi = (((temp_sata_addr_hi >> 24) & 0xff) +						|((temp_sata_addr_hi << 8) & +						0xff0000) | +						((temp_sata_addr_hi >> 8) +						& 0xff00) | +						((temp_sata_addr_hi << 24) & +						0xff000000)); +			temp_sata_addr_low = ((((temp_sata_addr_low >> 24) +						& 0xff) | +						((temp_sata_addr_low << 8) +						& 0xff0000) | +						((temp_sata_addr_low >> 8) +						& 0xff00) | +						((temp_sata_addr_low << 24) +						& 0xff000000)) + +						pm8001_dev->attached_phy + +						0x10); +			PM8001_FAIL_DBG(pm8001_ha, +				pm8001_printk("SAS Address of IO Failure Drive:" +				"%08x%08x", temp_sata_addr_hi, +					temp_sata_addr_low)); +		} else { +			PM8001_FAIL_DBG(pm8001_ha, +				pm8001_printk("SAS Address of IO Failure Drive:" +				"%016llx", SAS_ADDR(t->dev->sas_addr))); +		} +	}  	switch (status) {  	case IO_SUCCESS:  		PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); @@ -1870,11 +2168,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*in order to force CPU ordering*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -1890,11 +2184,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -1916,11 +2206,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  				IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/* ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -1983,11 +2269,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  					IO_DS_NON_OPERATIONAL);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -2007,11 +2289,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  					IO_DS_IN_ERROR);  			ts->resp = SAS_TASK_UNDELIVERED;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -2040,20 +2318,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)  			" resp 0x%x stat 0x%x but aborted by upper layer!\n",  			t, status, ts->resp, ts->stat));  		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -	} else if (t->uldd_task) { -		spin_unlock_irqrestore(&t->task_state_lock, flags); -		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -		mb();/* ditto */ -		spin_unlock_irq(&pm8001_ha->lock); -		t->task_done(t); -		spin_lock_irq(&pm8001_ha->lock); -	} else if (!t->uldd_task) { +	} else {  		spin_unlock_irqrestore(&t->task_state_lock, flags); -		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -		mb();/*ditto*/ -		spin_unlock_irq(&pm8001_ha->lock); -		t->task_done(t); -		spin_lock_irq(&pm8001_ha->lock); +		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  	}  } @@ -2165,11 +2432,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)  				IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);  			ts->resp = SAS_TASK_COMPLETE;  			ts->stat = SAS_QUEUE_FULL; -			pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -			mb();/*ditto*/ -			spin_unlock_irq(&pm8001_ha->lock); -			t->task_done(t); -			spin_lock_irq(&pm8001_ha->lock); +			pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  			return;  		}  		break; @@ -2291,20 +2554,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)  			" resp 0x%x stat 0x%x but aborted by upper layer!\n",  			t, event, ts->resp, ts->stat));  		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -	} else if (t->uldd_task) { -		spin_unlock_irqrestore(&t->task_state_lock, flags); -		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -		mb();/* ditto */ -		spin_unlock_irq(&pm8001_ha->lock); -		t->task_done(t); -		spin_lock_irq(&pm8001_ha->lock); -	} else if (!t->uldd_task) { +	} else {  		spin_unlock_irqrestore(&t->task_state_lock, flags); -		pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); -		mb();/*ditto*/ -		spin_unlock_irq(&pm8001_ha->lock); -		t->task_done(t); -		spin_lock_irq(&pm8001_ha->lock); +		pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);  	}  } @@ -2589,6 +2841,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)  	unsigned long flags;  	u8 deviceType = pPayload->sas_identify.dev_type;  	port->port_state = portstate; +	phy->phy_state = PHY_STATE_LINK_UP_SPCV;  	PM8001_MSG_DBG(pm8001_ha, pm8001_printk(  		"portid:%d; phyid:%d; linkrate:%d; "  		"portstate:%x; devicetype:%x\n", @@ -2673,6 +2926,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)  				port_id, phy_id, link_rate, portstate));  	port->port_state = portstate; +	phy->phy_state = PHY_STATE_LINK_UP_SPCV;  	port->port_attached = 1;  	pm8001_get_lrate_mode(phy, link_rate);  	phy->phy_type |= PORT_TYPE_SATA; @@ -3103,9 +3357,27 @@ static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)  static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha,  			void *piomb)  { -	PM8001_MSG_DBG(pm8001_ha, -			pm8001_printk(" pm80xx_addition_functionality\n")); +	u8 page_code; +	struct set_phy_profile_resp *pPayload = +		(struct set_phy_profile_resp *)(piomb + 4); +	u32 ppc_phyid = le32_to_cpu(pPayload->ppc_phyid); +	u32 status = le32_to_cpu(pPayload->status); +	page_code = (u8)((ppc_phyid & 0xFF00) >> 8); +	if (status) { +		/* status is FAILED */ +		PM8001_FAIL_DBG(pm8001_ha, +			pm8001_printk("PhyProfile command failed  with status " +			"0x%08X \n", status)); +		return -1; +	} else { +		if (page_code != SAS_PHY_ANALOG_SETTINGS_PAGE) { +			PM8001_FAIL_DBG(pm8001_ha, +				pm8001_printk("Invalid page code 0x%X\n", +					page_code)); +			return -1; +		} +	}  	return 0;  } @@ -3484,8 +3756,6 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,  	else  		pm8001_ha->smp_exp_mode = SMP_INDIRECT; -	/* DIRECT MODE support only in spcv/ve */ -	pm8001_ha->smp_exp_mode = SMP_DIRECT;  	tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req));  	preq_dma_addr = (char *)phys_to_virt(tmp_addr); @@ -3501,7 +3771,7 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,  		/* exclude top 4 bytes for SMP req header */  		smp_cmd.long_smp_req.long_req_addr =  			cpu_to_le64((u64)sg_dma_address -				(&task->smp_task.smp_req) - 4); +				(&task->smp_task.smp_req) + 4);  		/* exclude 4 bytes for SMP req header and CRC */  		smp_cmd.long_smp_req.long_req_size =  			cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); @@ -3604,10 +3874,10 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,  	struct ssp_ini_io_start_req ssp_cmd;  	u32 tag = ccb->ccb_tag;  	int ret; -	u64 phys_addr; +	u64 phys_addr, start_addr, end_addr; +	u32 end_addr_high, end_addr_low;  	struct inbound_queue_table *circularQ; -	static u32 inb; -	static u32 outb; +	u32 q_index;  	u32 opc = OPC_INB_SSPINIIOSTART;  	memset(&ssp_cmd, 0, sizeof(ssp_cmd));  	memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); @@ -3626,7 +3896,8 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,  	ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7);  	memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cmd->cmnd,  		       task->ssp_task.cmd->cmd_len); -	circularQ = &pm8001_ha->inbnd_q_tbl[0]; +	q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; +	circularQ = &pm8001_ha->inbnd_q_tbl[q_index];  	/* Check if encryption is set */  	if (pm8001_ha->chip->encrypt && @@ -3658,6 +3929,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,  				cpu_to_le32(upper_32_bits(dma_addr));  			ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len);  			ssp_cmd.enc_esgl = 0; +			/* Check 4G Boundary */ +			start_addr = cpu_to_le64(dma_addr); +			end_addr = (start_addr + ssp_cmd.enc_len) - 1; +			end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); +			end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); +			if (end_addr_high != ssp_cmd.enc_addr_high) { +				PM8001_FAIL_DBG(pm8001_ha, +					pm8001_printk("The sg list address " +					"start_addr=0x%016llx data_len=0x%x " +					"end_addr_high=0x%08x end_addr_low=" +					"0x%08x has crossed 4G boundary\n", +						start_addr, ssp_cmd.enc_len, +						end_addr_high, end_addr_low)); +				pm8001_chip_make_sg(task->scatter, 1, +					ccb->buf_prd); +				phys_addr = ccb->ccb_dma_handle + +					offsetof(struct pm8001_ccb_info, +						buf_prd[0]); +				ssp_cmd.enc_addr_low = +					cpu_to_le32(lower_32_bits(phys_addr)); +				ssp_cmd.enc_addr_high = +					cpu_to_le32(upper_32_bits(phys_addr)); +				ssp_cmd.enc_esgl = cpu_to_le32(1<<31); +			}  		} else if (task->num_scatter == 0) {  			ssp_cmd.enc_addr_low = 0;  			ssp_cmd.enc_addr_high = 0; @@ -3674,7 +3969,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,  	} else {  		PM8001_IO_DBG(pm8001_ha, pm8001_printk(  			"Sending Normal SAS command 0x%x inb q %x\n", -			task->ssp_task.cmd->cmnd[0], inb)); +			task->ssp_task.cmd->cmnd[0], q_index));  		/* fill in PRD (scatter/gather) table, if any */  		if (task->num_scatter > 1) {  			pm8001_chip_make_sg(task->scatter, ccb->n_elem, @@ -3693,6 +3988,30 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,  				cpu_to_le32(upper_32_bits(dma_addr));  			ssp_cmd.len = cpu_to_le32(task->total_xfer_len);  			ssp_cmd.esgl = 0; +			/* Check 4G Boundary */ +			start_addr = cpu_to_le64(dma_addr); +			end_addr = (start_addr + ssp_cmd.len) - 1; +			end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); +			end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); +			if (end_addr_high != ssp_cmd.addr_high) { +				PM8001_FAIL_DBG(pm8001_ha, +					pm8001_printk("The sg list address " +					"start_addr=0x%016llx data_len=0x%x " +					"end_addr_high=0x%08x end_addr_low=" +					"0x%08x has crossed 4G boundary\n", +						 start_addr, ssp_cmd.len, +						 end_addr_high, end_addr_low)); +				pm8001_chip_make_sg(task->scatter, 1, +					ccb->buf_prd); +				phys_addr = ccb->ccb_dma_handle + +					offsetof(struct pm8001_ccb_info, +						 buf_prd[0]); +				ssp_cmd.addr_low = +					cpu_to_le32(lower_32_bits(phys_addr)); +				ssp_cmd.addr_high = +					cpu_to_le32(upper_32_bits(phys_addr)); +				ssp_cmd.esgl = cpu_to_le32(1<<31); +			}  		} else if (task->num_scatter == 0) {  			ssp_cmd.addr_low = 0;  			ssp_cmd.addr_high = 0; @@ -3700,11 +4019,9 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,  			ssp_cmd.esgl = 0;  		}  	} -	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++); - -	/* rotate the outb queue */ -	outb = outb%PM8001_MAX_SPCV_OUTB_NUM; - +	q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; +	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, +						&ssp_cmd, q_index);  	return ret;  } @@ -3716,18 +4033,19 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,  	struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;  	u32 tag = ccb->ccb_tag;  	int ret; -	static u32 inb; -	static u32 outb; +	u32 q_index;  	struct sata_start_req sata_cmd;  	u32 hdr_tag, ncg_tag = 0; -	u64 phys_addr; +	u64 phys_addr, start_addr, end_addr; +	u32 end_addr_high, end_addr_low;  	u32 ATAP = 0x0;  	u32 dir;  	struct inbound_queue_table *circularQ;  	unsigned long flags;  	u32 opc = OPC_INB_SATA_HOST_OPSTART;  	memset(&sata_cmd, 0, sizeof(sata_cmd)); -	circularQ = &pm8001_ha->inbnd_q_tbl[0]; +	q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM; +	circularQ = &pm8001_ha->inbnd_q_tbl[q_index];  	if (task->data_dir == PCI_DMA_NONE) {  		ATAP = 0x04; /* no data*/ @@ -3788,6 +4106,31 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,  			sata_cmd.enc_addr_high = upper_32_bits(dma_addr);  			sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len);  			sata_cmd.enc_esgl = 0; +			/* Check 4G Boundary */ +			start_addr = cpu_to_le64(dma_addr); +			end_addr = (start_addr + sata_cmd.enc_len) - 1; +			end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); +			end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); +			if (end_addr_high != sata_cmd.enc_addr_high) { +				PM8001_FAIL_DBG(pm8001_ha, +					pm8001_printk("The sg list address " +					"start_addr=0x%016llx data_len=0x%x " +					"end_addr_high=0x%08x end_addr_low" +					"=0x%08x has crossed 4G boundary\n", +						start_addr, sata_cmd.enc_len, +						end_addr_high, end_addr_low)); +				pm8001_chip_make_sg(task->scatter, 1, +					ccb->buf_prd); +				phys_addr = ccb->ccb_dma_handle + +						offsetof(struct pm8001_ccb_info, +						buf_prd[0]); +				sata_cmd.enc_addr_low = +					lower_32_bits(phys_addr); +				sata_cmd.enc_addr_high = +					upper_32_bits(phys_addr); +				sata_cmd.enc_esgl = +					cpu_to_le32(1 << 31); +			}  		} else if (task->num_scatter == 0) {  			sata_cmd.enc_addr_low = 0;  			sata_cmd.enc_addr_high = 0; @@ -3808,7 +4151,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,  	} else {  		PM8001_IO_DBG(pm8001_ha, pm8001_printk(  			"Sending Normal SATA command 0x%x inb %x\n", -			sata_cmd.sata_fis.command, inb)); +			sata_cmd.sata_fis.command, q_index));  		/* dad (bit 0-1) is 0 */  		sata_cmd.ncqtag_atap_dir_m_dad =  			cpu_to_le32(((ncg_tag & 0xff)<<16) | @@ -3829,6 +4172,30 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,  			sata_cmd.addr_high = upper_32_bits(dma_addr);  			sata_cmd.len = cpu_to_le32(task->total_xfer_len);  			sata_cmd.esgl = 0; +			/* Check 4G Boundary */ +			start_addr = cpu_to_le64(dma_addr); +			end_addr = (start_addr + sata_cmd.len) - 1; +			end_addr_low = cpu_to_le32(lower_32_bits(end_addr)); +			end_addr_high = cpu_to_le32(upper_32_bits(end_addr)); +			if (end_addr_high != sata_cmd.addr_high) { +				PM8001_FAIL_DBG(pm8001_ha, +					pm8001_printk("The sg list address " +					"start_addr=0x%016llx data_len=0x%x" +					"end_addr_high=0x%08x end_addr_low=" +					"0x%08x has crossed 4G boundary\n", +						start_addr, sata_cmd.len, +						end_addr_high, end_addr_low)); +				pm8001_chip_make_sg(task->scatter, 1, +					ccb->buf_prd); +				phys_addr = ccb->ccb_dma_handle + +					offsetof(struct pm8001_ccb_info, +					buf_prd[0]); +				sata_cmd.addr_low = +					lower_32_bits(phys_addr); +				sata_cmd.addr_high = +					upper_32_bits(phys_addr); +				sata_cmd.esgl = cpu_to_le32(1 << 31); +			}  		} else if (task->num_scatter == 0) {  			sata_cmd.addr_low = 0;  			sata_cmd.addr_high = 0; @@ -3884,33 +4251,18 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,  					"\n", task, ts->resp, ts->stat));  				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);  				return 0; -			} else if (task->uldd_task) { -				spin_unlock_irqrestore(&task->task_state_lock, -							flags); -				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); -				mb();/* ditto */ -				spin_unlock_irq(&pm8001_ha->lock); -				task->task_done(task); -				spin_lock_irq(&pm8001_ha->lock); -				return 0; -			} else if (!task->uldd_task) { +			} else {  				spin_unlock_irqrestore(&task->task_state_lock,  							flags); -				pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); -				mb();/*ditto*/ -				spin_unlock_irq(&pm8001_ha->lock); -				task->task_done(task); -				spin_lock_irq(&pm8001_ha->lock); +				pm8001_ccb_task_free_done(pm8001_ha, task, +								ccb, tag);  				return 0;  			}  		}  	} - +	q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM;  	ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, -						&sata_cmd, outb++); - -	/* rotate the outb queue */ -	outb = outb%PM8001_MAX_SPCV_OUTB_NUM; +						&sata_cmd, q_index);  	return ret;  } @@ -3941,9 +4293,16 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)  	 ** [14]	0b disable spin up hold; 1b enable spin up hold  	 ** [15] ob no change in current PHY analig setup 1b enable using SPAST  	 */ -	payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | -			LINKMODE_AUTO | LINKRATE_15 | -			LINKRATE_30 | LINKRATE_60 | phy_id); +	if (!IS_SPCV_12G(pm8001_ha->pdev)) +		payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | +				LINKMODE_AUTO | LINKRATE_15 | +				LINKRATE_30 | LINKRATE_60 | phy_id); +	else +		payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | +				LINKMODE_AUTO | LINKRATE_15 | +				LINKRATE_30 | LINKRATE_60 | LINKRATE_120 | +				phy_id); +  	/* SSC Disable and SAS Analog ST configuration */  	/**  	payload.ase_sh_lm_slr_phyid = @@ -4102,6 +4461,45 @@ pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec)  	return IRQ_HANDLED;  } +void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, +	u32 operation, u32 phyid, u32 length, u32 *buf) +{ +	u32 tag , i, j = 0; +	int rc; +	struct set_phy_profile_req payload; +	struct inbound_queue_table *circularQ; +	u32 opc = OPC_INB_SET_PHY_PROFILE; + +	memset(&payload, 0, sizeof(payload)); +	rc = pm8001_tag_alloc(pm8001_ha, &tag); +	if (rc) +		PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Invalid tag\n")); +	circularQ = &pm8001_ha->inbnd_q_tbl[0]; +	payload.tag = cpu_to_le32(tag); +	payload.ppc_phyid = (((operation & 0xF) << 8) | (phyid  & 0xFF)); +	PM8001_INIT_DBG(pm8001_ha, +		pm8001_printk(" phy profile command for phy %x ,length is %d\n", +			payload.ppc_phyid, length)); +	for (i = length; i < (length + PHY_DWORD_LENGTH - 1); i++) { +		payload.reserved[j] =  cpu_to_le32(*((u32 *)buf + i)); +		j++; +	} +	pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); +} + +void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha, +	u32 length, u8 *buf) +{ +	u32 page_code, i; + +	page_code = SAS_PHY_ANALOG_SETTINGS_PAGE; +	for (i = 0; i < pm8001_ha->chip->n_phy; i++) { +		mpi_set_phy_profile_req(pm8001_ha, +			SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf); +		length = length + PHY_DWORD_LENGTH; +	} +	PM8001_INIT_DBG(pm8001_ha, pm8001_printk("phy settings completed\n")); +}  const struct pm8001_dispatch pm8001_80xx_dispatch = {  	.name			= "pmc80xx",  	.chip_init		= pm80xx_chip_init, diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h index 2b760ba75d7..9970a385795 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.h +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -168,6 +168,11 @@  #define LINKRATE_15			(0x01 << 8)  #define LINKRATE_30			(0x02 << 8)  #define LINKRATE_60			(0x06 << 8) +#define LINKRATE_120			(0x08 << 8) + +/* phy_profile */ +#define SAS_PHY_ANALOG_SETTINGS_PAGE	0x04 +#define PHY_DWORD_LENGTH		0xC  /* Thermal related */  #define	THERMAL_ENABLE			0x1 @@ -210,6 +215,8 @@  #define SAS_DOPNRJT_RTRY_TMO            128  #define SAS_COPNRJT_RTRY_TMO            128 +/* for phy state */ +#define PHY_STATE_LINK_UP_SPCV		0x2  /*    Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.    Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 @@ -1223,10 +1230,10 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;  /* MSGU CONFIGURATION TABLE*/ -#define SPCv_MSGU_CFG_TABLE_UPDATE		0x01 -#define SPCv_MSGU_CFG_TABLE_RESET		0x02 -#define SPCv_MSGU_CFG_TABLE_FREEZE		0x04 -#define SPCv_MSGU_CFG_TABLE_UNFREEZE		0x08 +#define SPCv_MSGU_CFG_TABLE_UPDATE		0x001 +#define SPCv_MSGU_CFG_TABLE_RESET		0x002 +#define SPCv_MSGU_CFG_TABLE_FREEZE		0x004 +#define SPCv_MSGU_CFG_TABLE_UNFREEZE		0x008  #define MSGU_IBDB_SET				0x00  #define MSGU_HOST_INT_STATUS			0x08  #define MSGU_HOST_INT_MASK			0x0C @@ -1520,4 +1527,6 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;  #define DEVREG_FAILURE_PORT_NOT_VALID_STATE		0x06  #define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID		0x07 + +#define MEMBASE_II_SHIFT_REGISTER       0x1010  #endif diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 1eb7b0280a4..be8ce54f99b 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -1404,11 +1404,22 @@ enum {  };  #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1) +static struct genl_multicast_group pmcraid_mcgrps[] = { +	{ .name = "events", /* not really used - see ID discussion below */ }, +}; +  static struct genl_family pmcraid_event_family = { -	.id = GENL_ID_GENERATE, +	/* +	 * Due to prior multicast group abuse (the code having assumed that +	 * the family ID can be used as a multicast group ID) we need to +	 * statically allocate a family (and thus group) ID. +	 */ +	.id = GENL_ID_PMCRAID,  	.name = "pmcraid",  	.version = 1, -	.maxattr = PMCRAID_AEN_ATTR_MAX +	.maxattr = PMCRAID_AEN_ATTR_MAX, +	.mcgrps = pmcraid_mcgrps, +	.n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps),  };  /** @@ -1511,8 +1522,8 @@ static int pmcraid_notify_aen(  		return result;  	} -	result = -		genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC); +	result = genlmsg_multicast(&pmcraid_event_family, skb, +				   0, 0, GFP_ATOMIC);  	/* If there are no listeners, genlmsg_multicast may return non-zero  	 * value. @@ -4314,6 +4325,7 @@ static struct scsi_host_template pmcraid_host_template = {  	.this_id = -1,  	.sg_tablesize = PMCRAID_MAX_IOADLS,  	.max_sectors = PMCRAID_IOA_MAX_SECTORS, +	.no_write_same = 1,  	.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,  	.use_clustering = ENABLE_CLUSTERING,  	.shost_attrs = pmcraid_host_attrs, @@ -6049,7 +6061,6 @@ out_release_regions:  out_disable_device:  	atomic_dec(&pmcraid_adapter_count); -	pci_set_drvdata(pdev, NULL);  	pci_disable_device(pdev);  	return -ENODEV;  } diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 5a522c5bbd4..158020522df 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -379,14 +379,7 @@  #define  DEBUG_PRINT_NVRAM	0  #define  DEBUG_QLA1280		0 -/* - * The SGI VISWS is broken and doesn't support MMIO ;-( - */ -#ifdef CONFIG_X86_VISWS -#define	MEMORY_MAPPED_IO	0 -#else  #define	MEMORY_MAPPED_IO	1 -#endif  #include "qla1280.h" @@ -2502,7 +2495,7 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)  	/* Issue set host interrupt command. */  	/* set up a timer just in case we're really jammed */ -	init_timer(&timer); +	init_timer_on_stack(&timer);  	timer.expires = jiffies + 20*HZ;  	timer.data = (unsigned long)ha;  	timer.function = qla1280_mailbox_timeout; diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile index ff0fc7c7812..44def6bb4bb 100644 --- a/drivers/scsi/qla2xxx/Makefile +++ b/drivers/scsi/qla2xxx/Makefile @@ -1,6 +1,6 @@  qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \  		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ -        qla_nx.o qla_mr.o qla_nx2.o qla_target.o +		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o  obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o  obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 5f174b83f56..16fe5196e6d 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -147,6 +147,92 @@ static struct bin_attribute sysfs_fw_dump_attr = {  };  static ssize_t +qla2x00_sysfs_read_fw_dump_template(struct file *filp, struct kobject *kobj, +			   struct bin_attribute *bin_attr, +			   char *buf, loff_t off, size_t count) +{ +	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, +	    struct device, kobj))); +	struct qla_hw_data *ha = vha->hw; + +	if (!ha->fw_dump_template || !ha->fw_dump_template_len) +		return 0; + +	ql_dbg(ql_dbg_user, vha, 0x70e2, +	    "chunk <- off=%llx count=%zx\n", off, count); +	return memory_read_from_buffer(buf, count, &off, +	    ha->fw_dump_template, ha->fw_dump_template_len); +} + +static ssize_t +qla2x00_sysfs_write_fw_dump_template(struct file *filp, struct kobject *kobj, +			    struct bin_attribute *bin_attr, +			    char *buf, loff_t off, size_t count) +{ +	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, +	    struct device, kobj))); +	struct qla_hw_data *ha = vha->hw; +	uint32_t size; + +	if (off == 0) { +		if (ha->fw_dump) +			vfree(ha->fw_dump); +		if (ha->fw_dump_template) +			vfree(ha->fw_dump_template); + +		ha->fw_dump = NULL; +		ha->fw_dump_len = 0; +		ha->fw_dump_template = NULL; +		ha->fw_dump_template_len = 0; + +		size = qla27xx_fwdt_template_size(buf); +		ql_dbg(ql_dbg_user, vha, 0x70d1, +		    "-> allocating fwdt (%x bytes)...\n", size); +		ha->fw_dump_template = vmalloc(size); +		if (!ha->fw_dump_template) { +			ql_log(ql_log_warn, vha, 0x70d2, +			    "Failed allocate fwdt (%x bytes).\n", size); +			return -ENOMEM; +		} +		ha->fw_dump_template_len = size; +	} + +	if (off + count > ha->fw_dump_template_len) { +		count = ha->fw_dump_template_len - off; +		ql_dbg(ql_dbg_user, vha, 0x70d3, +		    "chunk -> truncating to %zx bytes.\n", count); +	} + +	ql_dbg(ql_dbg_user, vha, 0x70d4, +	    "chunk -> off=%llx count=%zx\n", off, count); +	memcpy(ha->fw_dump_template + off, buf, count); + +	if (off + count == ha->fw_dump_template_len) { +		size = qla27xx_fwdt_calculate_dump_size(vha); +		ql_dbg(ql_dbg_user, vha, 0x70d5, +		    "-> allocating fwdump (%x bytes)...\n", size); +		ha->fw_dump = vmalloc(size); +		if (!ha->fw_dump) { +			ql_log(ql_log_warn, vha, 0x70d6, +			    "Failed allocate fwdump (%x bytes).\n", size); +			return -ENOMEM; +		} +		ha->fw_dump_len = size; +	} + +	return count; +} +static struct bin_attribute sysfs_fw_dump_template_attr = { +	.attr = { +		.name = "fw_dump_template", +		.mode = S_IRUSR | S_IWUSR, +	}, +	.size = 0, +	.read = qla2x00_sysfs_read_fw_dump_template, +	.write = qla2x00_sysfs_write_fw_dump_template, +}; + +static ssize_t  qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,  			 struct bin_attribute *bin_attr,  			 char *buf, loff_t off, size_t count) @@ -241,12 +327,17 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,  	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,  	    struct device, kobj)));  	struct qla_hw_data *ha = vha->hw; +	ssize_t rval = 0;  	if (ha->optrom_state != QLA_SREADING)  		return 0; -	return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, -					ha->optrom_region_size); +	mutex_lock(&ha->optrom_mutex); +	rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer, +	    ha->optrom_region_size); +	mutex_unlock(&ha->optrom_mutex); + +	return rval;  }  static ssize_t @@ -265,7 +356,9 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,  	if (off + count > ha->optrom_region_size)  		count = ha->optrom_region_size - off; +	mutex_lock(&ha->optrom_mutex);  	memcpy(&ha->optrom_buffer[off], buf, count); +	mutex_unlock(&ha->optrom_mutex);  	return count;  } @@ -288,10 +381,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,  	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,  	    struct device, kobj)));  	struct qla_hw_data *ha = vha->hw; -  	uint32_t start = 0;  	uint32_t size = ha->optrom_size;  	int val, valid; +	ssize_t rval = count;  	if (off)  		return -EINVAL; @@ -304,12 +397,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,  	if (start > ha->optrom_size)  		return -EINVAL; +	mutex_lock(&ha->optrom_mutex);  	switch (val) {  	case 0:  		if (ha->optrom_state != QLA_SREADING && -		    ha->optrom_state != QLA_SWRITING) -			return -EINVAL; - +		    ha->optrom_state != QLA_SWRITING) { +			rval =  -EINVAL; +			goto out; +		}  		ha->optrom_state = QLA_SWAITING;  		ql_dbg(ql_dbg_user, vha, 0x7061, @@ -320,8 +415,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,  		ha->optrom_buffer = NULL;  		break;  	case 1: -		if (ha->optrom_state != QLA_SWAITING) -			return -EINVAL; +		if (ha->optrom_state != QLA_SWAITING) { +			rval = -EINVAL; +			goto out; +		}  		ha->optrom_region_start = start;  		ha->optrom_region_size = start + size > ha->optrom_size ? @@ -335,13 +432,15 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,  			    "(%x).\n", ha->optrom_region_size);  			ha->optrom_state = QLA_SWAITING; -			return -ENOMEM; +			rval = -ENOMEM; +			goto out;  		}  		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {  			ql_log(ql_log_warn, vha, 0x7063,  			    "HBA not online, failing NVRAM update.\n"); -			return -EAGAIN; +			rval = -EAGAIN; +			goto out;  		}  		ql_dbg(ql_dbg_user, vha, 0x7064, @@ -353,8 +452,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,  		    ha->optrom_region_start, ha->optrom_region_size);  		break;  	case 2: -		if (ha->optrom_state != QLA_SWAITING) -			return -EINVAL; +		if (ha->optrom_state != QLA_SWAITING) { +			rval = -EINVAL; +			goto out; +		}  		/*  		 * We need to be more restrictive on which FLASH regions are @@ -388,7 +489,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,  		if (!valid) {  			ql_log(ql_log_warn, vha, 0x7065,  			    "Invalid start region 0x%x/0x%x.\n", start, size); -			return -EINVAL; +			rval = -EINVAL; +			goto out;  		}  		ha->optrom_region_start = start; @@ -403,7 +505,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,  			    "(%x)\n", ha->optrom_region_size);  			ha->optrom_state = QLA_SWAITING; -			return -ENOMEM; +			rval = -ENOMEM; +			goto out;  		}  		ql_dbg(ql_dbg_user, vha, 0x7067, @@ -413,13 +516,16 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,  		memset(ha->optrom_buffer, 0, ha->optrom_region_size);  		break;  	case 3: -		if (ha->optrom_state != QLA_SWRITING) -			return -EINVAL; +		if (ha->optrom_state != QLA_SWRITING) { +			rval = -EINVAL; +			goto out; +		}  		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {  			ql_log(ql_log_warn, vha, 0x7068,  			    "HBA not online, failing flash update.\n"); -			return -EAGAIN; +			rval = -EAGAIN; +			goto out;  		}  		ql_dbg(ql_dbg_user, vha, 0x7069, @@ -430,9 +536,12 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,  		    ha->optrom_region_start, ha->optrom_region_size);  		break;  	default: -		return -EINVAL; +		rval = -EINVAL;  	} -	return count; + +out: +	mutex_unlock(&ha->optrom_mutex); +	return rval;  }  static struct bin_attribute sysfs_optrom_ctl_attr = { @@ -555,7 +664,7 @@ do_read:  		}  		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data, -		    addr, offset, SFP_BLOCK_SIZE, 0); +		    addr, offset, SFP_BLOCK_SIZE, BIT_1);  		if (rval != QLA_SUCCESS) {  			ql_log(ql_log_warn, vha, 0x706d,  			    "Unable to read SFP data (%x/%x/%x).\n", rval, @@ -822,6 +931,7 @@ static struct sysfs_entry {  	int is4GBp_only;  } bin_file_entries[] = {  	{ "fw_dump", &sysfs_fw_dump_attr, }, +	{ "fw_dump_template", &sysfs_fw_dump_template_attr, 0x27 },  	{ "nvram", &sysfs_nvram_attr, },  	{ "optrom", &sysfs_optrom_attr, },  	{ "optrom_ctl", &sysfs_optrom_ctl_attr, }, @@ -847,6 +957,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)  			continue;  		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))  			continue; +		if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw)) +			continue;  		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,  		    iter->attr); @@ -862,7 +974,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)  }  void -qla2x00_free_sysfs_attr(scsi_qla_host_t *vha) +qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)  {  	struct Scsi_Host *host = vha->host;  	struct sysfs_entry *iter; @@ -880,7 +992,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)  		    iter->attr);  	} -	if (ha->beacon_blink_led == 1) +	if (stop_beacon && ha->beacon_blink_led == 1)  		ha->isp_ops->beacon_off(vha);  } @@ -890,7 +1002,7 @@ static ssize_t  qla2x00_drvr_version_show(struct device *dev,  			  struct device_attribute *attr, char *buf)  { -	return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str); +	return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);  }  static ssize_t @@ -901,7 +1013,7 @@ qla2x00_fw_version_show(struct device *dev,  	struct qla_hw_data *ha = vha->hw;  	char fw_str[128]; -	return snprintf(buf, PAGE_SIZE, "%s\n", +	return scnprintf(buf, PAGE_SIZE, "%s\n",  	    ha->isp_ops->fw_version_str(vha, fw_str));  } @@ -914,15 +1026,15 @@ qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,  	uint32_t sn;  	if (IS_QLAFX00(vha->hw)) { -		return snprintf(buf, PAGE_SIZE, "%s\n", +		return scnprintf(buf, PAGE_SIZE, "%s\n",  		    vha->hw->mr.serial_num);  	} else if (IS_FWI2_CAPABLE(ha)) { -		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE); -		return snprintf(buf, PAGE_SIZE, "%s\n", buf); +		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1); +		return strlen(strcat(buf, "\n"));  	}  	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; -	return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000, +	return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,  	    sn % 100000);  } @@ -931,7 +1043,7 @@ qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,  		      char *buf)  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); -	return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device); +	return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);  }  static ssize_t @@ -942,10 +1054,10 @@ qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,  	struct qla_hw_data *ha = vha->hw;  	if (IS_QLAFX00(vha->hw)) -		return snprintf(buf, PAGE_SIZE, "%s\n", +		return scnprintf(buf, PAGE_SIZE, "%s\n",  		    vha->hw->mr.hw_version); -	return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n", +	return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",  	    ha->product_id[0], ha->product_id[1], ha->product_id[2],  	    ha->product_id[3]);  } @@ -956,11 +1068,7 @@ qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); -	if (IS_QLAFX00(vha->hw)) -		return snprintf(buf, PAGE_SIZE, "%s\n", -		    vha->hw->mr.product_name); - -	return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number); +	return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);  }  static ssize_t @@ -968,7 +1076,7 @@ qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,  			char *buf)  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); -	return snprintf(buf, PAGE_SIZE, "%s\n", +	return scnprintf(buf, PAGE_SIZE, "%s\n",  	    vha->hw->model_desc ? vha->hw->model_desc : "");  } @@ -979,7 +1087,7 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	char pci_info[30]; -	return snprintf(buf, PAGE_SIZE, "%s\n", +	return scnprintf(buf, PAGE_SIZE, "%s\n",  	    vha->hw->isp_ops->pci_info_str(vha, pci_info));  } @@ -994,29 +1102,29 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,  	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||  	    atomic_read(&vha->loop_state) == LOOP_DEAD ||  	    vha->device_flags & DFLG_NO_CABLE) -		len = snprintf(buf, PAGE_SIZE, "Link Down\n"); +		len = scnprintf(buf, PAGE_SIZE, "Link Down\n");  	else if (atomic_read(&vha->loop_state) != LOOP_READY ||  	    qla2x00_reset_active(vha)) -		len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n"); +		len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");  	else { -		len = snprintf(buf, PAGE_SIZE, "Link Up - "); +		len = scnprintf(buf, PAGE_SIZE, "Link Up - ");  		switch (ha->current_topology) {  		case ISP_CFG_NL: -			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); +			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");  			break;  		case ISP_CFG_FL: -			len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n"); +			len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");  			break;  		case ISP_CFG_N: -			len += snprintf(buf + len, PAGE_SIZE-len, +			len += scnprintf(buf + len, PAGE_SIZE-len,  			    "N_Port to N_Port\n");  			break;  		case ISP_CFG_F: -			len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n"); +			len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");  			break;  		default: -			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n"); +			len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");  			break;  		}  	} @@ -1032,10 +1140,10 @@ qla2x00_zio_show(struct device *dev, struct device_attribute *attr,  	switch (vha->hw->zio_mode) {  	case QLA_ZIO_MODE_6: -		len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n"); +		len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");  		break;  	case QLA_ZIO_DISABLED: -		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); +		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");  		break;  	}  	return len; @@ -1075,7 +1183,7 @@ qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); -	return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100); +	return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);  }  static ssize_t @@ -1105,9 +1213,9 @@ qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,  	int len = 0;  	if (vha->hw->beacon_blink_led) -		len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n"); +		len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");  	else -		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n"); +		len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");  	return len;  } @@ -1149,7 +1257,7 @@ qla2x00_optrom_bios_version_show(struct device *dev,  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	struct qla_hw_data *ha = vha->hw; -	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1], +	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],  	    ha->bios_revision[0]);  } @@ -1159,7 +1267,7 @@ qla2x00_optrom_efi_version_show(struct device *dev,  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	struct qla_hw_data *ha = vha->hw; -	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1], +	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],  	    ha->efi_revision[0]);  } @@ -1169,7 +1277,7 @@ qla2x00_optrom_fcode_version_show(struct device *dev,  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	struct qla_hw_data *ha = vha->hw; -	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1], +	return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],  	    ha->fcode_revision[0]);  } @@ -1179,7 +1287,7 @@ qla2x00_optrom_fw_version_show(struct device *dev,  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	struct qla_hw_data *ha = vha->hw; -	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n", +	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",  	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],  	    ha->fw_revision[3]);  } @@ -1191,10 +1299,10 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	struct qla_hw_data *ha = vha->hw; -	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) -		return snprintf(buf, PAGE_SIZE, "\n"); +	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha)) +		return scnprintf(buf, PAGE_SIZE, "\n"); -	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", +	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",  	    ha->gold_fw_version[0], ha->gold_fw_version[1],  	    ha->gold_fw_version[2], ha->gold_fw_version[3]);  } @@ -1204,7 +1312,7 @@ qla2x00_total_isp_aborts_show(struct device *dev,  			      struct device_attribute *attr, char *buf)  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); -	return snprintf(buf, PAGE_SIZE, "%d\n", +	return scnprintf(buf, PAGE_SIZE, "%d\n",  	    vha->qla_stats.total_isp_aborts);  } @@ -1218,16 +1326,16 @@ qla24xx_84xx_fw_version_show(struct device *dev,  	struct qla_hw_data *ha = vha->hw;  	if (!IS_QLA84XX(ha)) -		return snprintf(buf, PAGE_SIZE, "\n"); +		return scnprintf(buf, PAGE_SIZE, "\n");  	if (ha->cs84xx->op_fw_version == 0)  		rval = qla84xx_verify_chip(vha, status);  	if ((rval == QLA_SUCCESS) && (status[0] == 0)) -		return snprintf(buf, PAGE_SIZE, "%u\n", +		return scnprintf(buf, PAGE_SIZE, "%u\n",  			(uint32_t)ha->cs84xx->op_fw_version); -	return snprintf(buf, PAGE_SIZE, "\n"); +	return scnprintf(buf, PAGE_SIZE, "\n");  }  static ssize_t @@ -1238,9 +1346,9 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,  	struct qla_hw_data *ha = vha->hw;  	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) -		return snprintf(buf, PAGE_SIZE, "\n"); +		return scnprintf(buf, PAGE_SIZE, "\n"); -	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", +	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",  	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],  	    ha->mpi_capabilities);  } @@ -1253,9 +1361,9 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,  	struct qla_hw_data *ha = vha->hw;  	if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) -		return snprintf(buf, PAGE_SIZE, "\n"); +		return scnprintf(buf, PAGE_SIZE, "\n"); -	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", +	return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",  	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);  } @@ -1266,7 +1374,7 @@ qla2x00_flash_block_size_show(struct device *dev,  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	struct qla_hw_data *ha = vha->hw; -	return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size); +	return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);  }  static ssize_t @@ -1276,9 +1384,9 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	if (!IS_CNA_CAPABLE(vha->hw)) -		return snprintf(buf, PAGE_SIZE, "\n"); +		return scnprintf(buf, PAGE_SIZE, "\n"); -	return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); +	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);  }  static ssize_t @@ -1288,9 +1396,9 @@ qla2x00_vn_port_mac_address_show(struct device *dev,  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	if (!IS_CNA_CAPABLE(vha->hw)) -		return snprintf(buf, PAGE_SIZE, "\n"); +		return scnprintf(buf, PAGE_SIZE, "\n"); -	return snprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac); +	return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);  }  static ssize_t @@ -1299,7 +1407,7 @@ qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,  {  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); -	return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap); +	return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);  }  static ssize_t @@ -1320,10 +1428,10 @@ qla2x00_thermal_temp_show(struct device *dev,  	}  	if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS) -		return snprintf(buf, PAGE_SIZE, "%d\n", temp); +		return scnprintf(buf, PAGE_SIZE, "%d\n", temp);  done: -	return snprintf(buf, PAGE_SIZE, "\n"); +	return scnprintf(buf, PAGE_SIZE, "\n");  }  static ssize_t @@ -1337,7 +1445,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,  	if (IS_QLAFX00(vha->hw)) {  		pstate = qlafx00_fw_state_show(dev, attr, buf); -		return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate); +		return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);  	}  	if (qla2x00_reset_active(vha)) @@ -1348,7 +1456,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,  	if (rval != QLA_SUCCESS)  		memset(state, -1, sizeof(state)); -	return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], +	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],  	    state[1], state[2], state[3], state[4]);  } @@ -1359,9 +1467,9 @@ qla2x00_diag_requests_show(struct device *dev,  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	if (!IS_BIDI_CAPABLE(vha->hw)) -		return snprintf(buf, PAGE_SIZE, "\n"); +		return scnprintf(buf, PAGE_SIZE, "\n"); -	return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count); +	return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);  }  static ssize_t @@ -1371,9 +1479,9 @@ qla2x00_diag_megabytes_show(struct device *dev,  	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));  	if (!IS_BIDI_CAPABLE(vha->hw)) -		return snprintf(buf, PAGE_SIZE, "\n"); +		return scnprintf(buf, PAGE_SIZE, "\n"); -	return snprintf(buf, PAGE_SIZE, "%llu\n", +	return scnprintf(buf, PAGE_SIZE, "%llu\n",  	    vha->bidi_stats.transfer_bytes >> 20);  } @@ -1387,12 +1495,43 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,  	if (!ha->fw_dumped)  		size = 0; -	else if (IS_QLA82XX(ha)) +	else if (IS_P3P_TYPE(ha))  		size = ha->md_template_size + ha->md_dump_size;  	else  		size = ha->fw_dump_len; -	return snprintf(buf, PAGE_SIZE, "%d\n", size); +	return scnprintf(buf, PAGE_SIZE, "%d\n", size); +} + +static ssize_t +qla2x00_allow_cna_fw_dump_show(struct device *dev, +	struct device_attribute *attr, char *buf) +{ +	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + +	if (!IS_P3P_TYPE(vha->hw)) +		return scnprintf(buf, PAGE_SIZE, "\n"); +	else +		return scnprintf(buf, PAGE_SIZE, "%s\n", +		    vha->hw->allow_cna_fw_dump ? "true" : "false"); +} + +static ssize_t +qla2x00_allow_cna_fw_dump_store(struct device *dev, +	struct device_attribute *attr, const char *buf, size_t count) +{ +	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); +	int val = 0; + +	if (!IS_P3P_TYPE(vha->hw)) +		return -EINVAL; + +	if (sscanf(buf, "%d", &val) != 1) +		return -EINVAL; + +	vha->hw->allow_cna_fw_dump = val != 0; + +	return strlen(buf);  }  static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); @@ -1436,6 +1575,9 @@ static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);  static DEVICE_ATTR(diag_requests, S_IRUGO, qla2x00_diag_requests_show, NULL);  static DEVICE_ATTR(diag_megabytes, S_IRUGO, qla2x00_diag_megabytes_show, NULL);  static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL); +static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, +		   qla2x00_allow_cna_fw_dump_show, +		   qla2x00_allow_cna_fw_dump_store);  struct device_attribute *qla2x00_host_attrs[] = {  	&dev_attr_driver_version, @@ -1468,6 +1610,7 @@ struct device_attribute *qla2x00_host_attrs[] = {  	&dev_attr_diag_requests,  	&dev_attr_diag_megabytes,  	&dev_attr_fw_dump_size, +	&dev_attr_allow_cna_fw_dump,  	NULL,  }; @@ -1513,6 +1656,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)  	case PORT_SPEED_16GB:  		speed = FC_PORTSPEED_16GBIT;  		break; +	case PORT_SPEED_32GB: +		speed = FC_PORTSPEED_32GBIT; +		break;  	}  	fc_host_speed(shost) = speed;  } @@ -1994,6 +2140,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)  	vha->flags.delete_progress = 1; +	qlt_remove_target(ha, vha); +  	fc_remove_host(vha->host);  	scsi_remove_host(vha->host); @@ -2162,6 +2310,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)  	else if (IS_QLAFX00(ha))  		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |  		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; +	else if (IS_QLA27XX(ha)) +		speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT | +		    FC_PORTSPEED_8GBIT;  	else  		speed = FC_PORTSPEED_1GBIT;  	fc_host_supported_speeds(vha->host) = speed; diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index aa57bf0af57..524f9eb7fcd 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2012 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -1437,9 +1437,12 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)  	if (ha->flags.nic_core_reset_hdlr_active)  		return -EBUSY; +	mutex_lock(&ha->optrom_mutex);  	rval = qla2x00_optrom_setup(bsg_job, vha, 0); -	if (rval) +	if (rval) { +		mutex_unlock(&ha->optrom_mutex);  		return rval; +	}  	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,  	    ha->optrom_region_start, ha->optrom_region_size); @@ -1453,6 +1456,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)  	vfree(ha->optrom_buffer);  	ha->optrom_buffer = NULL;  	ha->optrom_state = QLA_SWAITING; +	mutex_unlock(&ha->optrom_mutex);  	bsg_job->job_done(bsg_job);  	return rval;  } @@ -1465,9 +1469,12 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)  	struct qla_hw_data *ha = vha->hw;  	int rval = 0; +	mutex_lock(&ha->optrom_mutex);  	rval = qla2x00_optrom_setup(bsg_job, vha, 1); -	if (rval) +	if (rval) { +		mutex_unlock(&ha->optrom_mutex);  		return rval; +	}  	/* Set the isp82xx_no_md_cap not to capture minidump */  	ha->flags.isp82xx_no_md_cap = 1; @@ -1483,6 +1490,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)  	vfree(ha->optrom_buffer);  	ha->optrom_buffer = NULL;  	ha->optrom_state = QLA_SWAITING; +	mutex_unlock(&ha->optrom_mutex);  	bsg_job->job_done(bsg_job);  	return rval;  } @@ -2022,6 +2030,86 @@ done:  }  static int +qla26xx_serdes_op(struct fc_bsg_job *bsg_job) +{ +	struct Scsi_Host *host = bsg_job->shost; +	scsi_qla_host_t *vha = shost_priv(host); +	int rval = 0; +	struct qla_serdes_reg sr; + +	memset(&sr, 0, sizeof(sr)); + +	sg_copy_to_buffer(bsg_job->request_payload.sg_list, +	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); + +	switch (sr.cmd) { +	case INT_SC_SERDES_WRITE_REG: +		rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val); +		bsg_job->reply->reply_payload_rcv_len = 0; +		break; +	case INT_SC_SERDES_READ_REG: +		rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val); +		sg_copy_from_buffer(bsg_job->reply_payload.sg_list, +		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); +		bsg_job->reply->reply_payload_rcv_len = sizeof(sr); +		break; +	default: +		ql_dbg(ql_dbg_user, vha, 0x708c, +		    "Unknown serdes cmd %x.\n", sr.cmd); +		rval = -EINVAL; +		break; +	} + +	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = +	    rval ? EXT_STATUS_MAILBOX : 0; + +	bsg_job->reply_len = sizeof(struct fc_bsg_reply); +	bsg_job->reply->result = DID_OK << 16; +	bsg_job->job_done(bsg_job); +	return 0; +} + +static int +qla8044_serdes_op(struct fc_bsg_job *bsg_job) +{ +	struct Scsi_Host *host = bsg_job->shost; +	scsi_qla_host_t *vha = shost_priv(host); +	int rval = 0; +	struct qla_serdes_reg_ex sr; + +	memset(&sr, 0, sizeof(sr)); + +	sg_copy_to_buffer(bsg_job->request_payload.sg_list, +	    bsg_job->request_payload.sg_cnt, &sr, sizeof(sr)); + +	switch (sr.cmd) { +	case INT_SC_SERDES_WRITE_REG: +		rval = qla8044_write_serdes_word(vha, sr.addr, sr.val); +		bsg_job->reply->reply_payload_rcv_len = 0; +		break; +	case INT_SC_SERDES_READ_REG: +		rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val); +		sg_copy_from_buffer(bsg_job->reply_payload.sg_list, +		    bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr)); +		bsg_job->reply->reply_payload_rcv_len = sizeof(sr); +		break; +	default: +		ql_dbg(ql_dbg_user, vha, 0x70cf, +		    "Unknown serdes cmd %x.\n", sr.cmd); +		rval = -EINVAL; +		break; +	} + +	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = +	    rval ? EXT_STATUS_MAILBOX : 0; + +	bsg_job->reply_len = sizeof(struct fc_bsg_reply); +	bsg_job->reply->result = DID_OK << 16; +	bsg_job->job_done(bsg_job); +	return 0; +} + +static int  qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)  {  	switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { @@ -2069,6 +2157,13 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)  	case QL_VND_FX00_MGMT_CMD:  		return qlafx00_mgmt_cmd(bsg_job); + +	case QL_VND_SERDES_OP: +		return qla26xx_serdes_op(bsg_job); + +	case QL_VND_SERDES_OP_EX: +		return qla8044_serdes_op(bsg_job); +  	default:  		return -ENOSYS;  	} diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index 04f770332c2..d38f9efa56f 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -23,6 +23,8 @@  #define QL_VND_WRITE_I2C	0x10  #define QL_VND_READ_I2C		0x11  #define QL_VND_FX00_MGMT_CMD	0x12 +#define QL_VND_SERDES_OP	0x13 +#define	QL_VND_SERDES_OP_EX	0x14  /* BSG Vendor specific subcode returns */  #define EXT_STATUS_OK			0 @@ -212,4 +214,22 @@ struct qla_i2c_access {  	uint8_t  buffer[0x40];  } __packed; +/* 26xx serdes register interface */ + +/* serdes reg commands */ +#define INT_SC_SERDES_READ_REG		1 +#define INT_SC_SERDES_WRITE_REG		2 + +struct qla_serdes_reg { +	uint16_t cmd; +	uint16_t addr; +	uint16_t val; +} __packed; + +struct qla_serdes_reg_ex { +	uint16_t cmd; +	uint32_t addr; +	uint32_t val; +} __packed; +  #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 2ef497ebadc..c72ee97bf3f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -11,44 +11,50 @@   * ----------------------------------------------------------------------   * |             Level            |   Last Value Used  |     Holes	|   * ---------------------------------------------------------------------- - * | Module Init and Probe        |       0x0159       | 0x4b,0xba,0xfa | - * | Mailbox commands             |       0x1181       | 0x111a-0x111b  | - * |                              |                    | 0x1155-0x1158  | - * |                              |                    | 0x1018-0x1019  | + * | Module Init and Probe        |       0x017d       | 0x004b,0x0141	| + * |                              |                    | 0x0144,0x0146	| + * |                              |                    | 0x015b-0x0160	| + * |                              |                    | 0x016e-0x0170	| + * | Mailbox commands             |       0x118d       | 0x1018-0x1019	| + * |                              |                    | 0x10ca         |   * |                              |                    | 0x1115-0x1116  | - * |                              |                    | 0x10ca		| + * |                              |                    | 0x111a-0x111b	| + * |                              |                    | 0x1155-0x1158  |   * | Device Discovery             |       0x2095       | 0x2020-0x2022, |   * |                              |                    | 0x2011-0x2012, |   * |                              |                    | 0x2016         | - * | Queue Command and IO tracing |       0x3058       | 0x3006-0x300b  | + * | Queue Command and IO tracing |       0x3059       | 0x3006-0x300b  |   * |                              |                    | 0x3027-0x3028  |   * |                              |                    | 0x303d-0x3041  |   * |                              |                    | 0x302d,0x3033  |   * |                              |                    | 0x3036,0x3038  |   * |                              |                    | 0x303a		| - * | DPC Thread                   |       0x4022       | 0x4002,0x4013  | + * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |   * | Async Events                 |       0x5087       | 0x502b-0x502f  |   * |                              |                    | 0x5047,0x5052  |   * |                              |                    | 0x5084,0x5075	|   * |                              |                    | 0x503d,0x5044  | + * |                              |                    | 0x507b		|   * | Timer Routines               |       0x6012       |                | - * | User Space Interactions      |       0x70e1       | 0x7018,0x702e, | - * |                              |                    | 0x7020,0x7024, | - * |                              |                    | 0x7039,0x7045, | - * |                              |                    | 0x7073-0x7075, | - * |                              |                    | 0x707b,0x708c, | - * |                              |                    | 0x70a5,0x70a6, | - * |                              |                    | 0x70a8,0x70ab, | - * |                              |                    | 0x70ad-0x70ae, | - * |                              |                    | 0x70d1-0x70db, | - * |                              |                    | 0x7047,0x703b	| - * |                              |                    | 0x70de-0x70df, | - * | Task Management              |       0x803d       | 0x8025-0x8026  | - * |                              |                    | 0x800b,0x8039  | + * | User Space Interactions      |       0x70e2       | 0x7018,0x702e  | + * |				  |		       | 0x7020,0x7024  | + * |                              |                    | 0x7039,0x7045  | + * |                              |                    | 0x7073-0x7075  | + * |                              |                    | 0x70a5-0x70a6  | + * |                              |                    | 0x70a8,0x70ab  | + * |                              |                    | 0x70ad-0x70ae  | + * |                              |                    | 0x70d7-0x70db  | + * |                              |                    | 0x70de-0x70df  | + * | Task Management              |       0x803d       | 0x8000,0x800b  | + * |                              |                    | 0x8019         | + * |                              |                    | 0x8025,0x8026  | + * |                              |                    | 0x8031,0x8032  | + * |                              |                    | 0x8039,0x803c  |   * | AER/EEH                      |       0x9011       |		|   * | Virtual Port                 |       0xa007       |		| - * | ISP82XX Specific             |       0xb14c       | 0xb002,0xb024  | + * | ISP82XX Specific             |       0xb157       | 0xb002,0xb024  |   * |                              |                    | 0xb09e,0xb0ae  | + * |				  |		       | 0xb0c3,0xb0c6  |   * |                              |                    | 0xb0e0-0xb0ef  |   * |                              |                    | 0xb085,0xb0dc  |   * |                              |                    | 0xb107,0xb108  | @@ -58,8 +64,12 @@   * |                              |                    | 0xb13c-0xb140  |   * |                              |                    | 0xb149		|   * | MultiQ                       |       0xc00c       |		| - * | Misc                         |       0xd010       |		| - * | Target Mode		  |	  0xe070       | 0xe021		| + * | Misc                         |       0xd212       | 0xd017-0xd019	| + * |                              |                    | 0xd020		| + * |                              |                    | 0xd030-0xd0ff	| + * |                              |                    | 0xd101-0xd1fe	| + * |                              |                    | 0xd213-0xd2fe	| + * | Target Mode		  |	  0xe078       |		|   * | Target Mode Management	  |	  0xf072       | 0xf002-0xf003	|   * |                              |                    | 0xf046-0xf049  |   * | Target Mode Task Management  |	  0x1000b      |		| @@ -103,7 +113,87 @@ qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)  	return ptr + (rsp->length * sizeof(response_t));  } -static int +int +qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, +	uint32_t ram_dwords, void **nxt) +{ +	int rval; +	uint32_t cnt, stat, timer, dwords, idx; +	uint16_t mb0, mb1; +	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; +	dma_addr_t dump_dma = ha->gid_list_dma; +	uint32_t *dump = (uint32_t *)ha->gid_list; + +	rval = QLA_SUCCESS; +	mb0 = 0; + +	WRT_REG_WORD(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); +	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + +	dwords = qla2x00_gid_list_size(ha) / 4; +	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; +	    cnt += dwords, addr += dwords) { +		if (cnt + dwords > ram_dwords) +			dwords = ram_dwords - cnt; + +		WRT_REG_WORD(®->mailbox1, LSW(addr)); +		WRT_REG_WORD(®->mailbox8, MSW(addr)); + +		WRT_REG_WORD(®->mailbox2, MSW(dump_dma)); +		WRT_REG_WORD(®->mailbox3, LSW(dump_dma)); +		WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); +		WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); + +		WRT_REG_WORD(®->mailbox4, MSW(dwords)); +		WRT_REG_WORD(®->mailbox5, LSW(dwords)); + +		WRT_REG_WORD(®->mailbox9, 0); +		WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); + +		ha->flags.mbox_int = 0; +		for (timer = 6000000; timer; timer--) { +			/* Check for pending interrupts. */ +			stat = RD_REG_DWORD(®->host_status); +			if (stat & HSRX_RISC_INT) { +				stat &= 0xff; + +				if (stat == 0x1 || stat == 0x2 || +				    stat == 0x10 || stat == 0x11) { +					set_bit(MBX_INTERRUPT, +					    &ha->mbx_cmd_flags); + +					mb0 = RD_REG_WORD(®->mailbox0); +					mb1 = RD_REG_WORD(®->mailbox1); + +					WRT_REG_DWORD(®->hccr, +					    HCCRX_CLR_RISC_INT); +					RD_REG_DWORD(®->hccr); +					break; +				} + +				/* Clear this intr; it wasn't a mailbox intr */ +				WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); +				RD_REG_DWORD(®->hccr); +			} +			udelay(5); +		} +		ha->flags.mbox_int = 1; + +		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { +			rval = mb0 & MBS_MASK; +			for (idx = 0; idx < dwords; idx++) +				ram[cnt + idx] = IS_QLA27XX(ha) ? +				    le32_to_cpu(dump[idx]) : swab32(dump[idx]); +		} else { +			rval = QLA_FUNCTION_FAILED; +		} +	} + +	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL; +	return rval; +} + +int  qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,      uint32_t ram_dwords, void **nxt)  { @@ -138,6 +228,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,  		WRT_REG_WORD(®->mailbox5, LSW(dwords));  		WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); +		ha->flags.mbox_int = 0;  		for (timer = 6000000; timer; timer--) {  			/* Check for pending interrupts. */  			stat = RD_REG_DWORD(®->host_status); @@ -163,11 +254,13 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,  			}  			udelay(5);  		} +		ha->flags.mbox_int = 1;  		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {  			rval = mb0 & MBS_MASK;  			for (idx = 0; idx < dwords; idx++) -				ram[cnt + idx] = swab32(dump[idx]); +				ram[cnt + idx] = IS_QLA27XX(ha) ? +				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);  		} else {  			rval = QLA_FUNCTION_FAILED;  		} @@ -188,9 +281,15 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,  	if (rval != QLA_SUCCESS)  		return rval; +	set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags); +  	/* External Memory. */ -	return qla24xx_dump_ram(ha, 0x100000, *nxt, +	rval = qla24xx_dump_ram(ha, 0x100000, *nxt,  	    ha->fw_memory_size - 0x100000 + 1, nxt); +	if (rval == QLA_SUCCESS) +		set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags); + +	return rval;  }  static uint32_t * @@ -207,34 +306,30 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,  	return buf;  } -static inline int -qla24xx_pause_risc(struct device_reg_24xx __iomem *reg) +void +qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)  { -	int rval = QLA_SUCCESS; -	uint32_t cnt; -  	WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); -	for (cnt = 30000; -	    ((RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) == 0) && -	    rval == QLA_SUCCESS; cnt--) { -		if (cnt) -			udelay(100); -		else -			rval = QLA_FUNCTION_TIMEOUT; -	} -	return rval; +	/* 100 usec delay is sufficient enough for hardware to pause RISC */ +	udelay(100); +	if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) +		set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);  } -static int +int  qla24xx_soft_reset(struct qla_hw_data *ha)  {  	int rval = QLA_SUCCESS;  	uint32_t cnt; -	uint16_t mb0, wd; +	uint16_t wd;  	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; -	/* Reset RISC. */ +	/* +	 * Reset RISC. The delay is dependent on system architecture. +	 * Driver can proceed with the reset sequence after waiting +	 * for a timeout period. +	 */  	WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);  	for (cnt = 0; cnt < 30000; cnt++) {  		if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) @@ -242,19 +337,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha)  		udelay(10);  	} +	if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) +		set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);  	WRT_REG_DWORD(®->ctrl_status,  	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);  	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);  	udelay(100); -	/* Wait for firmware to complete NVRAM accesses. */ -	mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); -	for (cnt = 10000 ; cnt && mb0; cnt--) { -		udelay(5); -		mb0 = (uint32_t) RD_REG_WORD(®->mailbox0); -		barrier(); -	}  	/* Wait for soft-reset to complete. */  	for (cnt = 0; cnt < 30000; cnt++) { @@ -264,16 +354,21 @@ qla24xx_soft_reset(struct qla_hw_data *ha)  		udelay(10);  	} +	if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) +		set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); +  	WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET);  	RD_REG_DWORD(®->hccr);             /* PCI Posting. */ -	for (cnt = 30000; RD_REG_WORD(®->mailbox0) != 0 && +	for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 &&  	    rval == QLA_SUCCESS; cnt--) {  		if (cnt) -			udelay(100); +			udelay(10);  		else  			rval = QLA_FUNCTION_TIMEOUT;  	} +	if (rval == QLA_SUCCESS) +		set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);  	return rval;  } @@ -536,7 +631,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)  	struct qla2xxx_mq_chain *mq = ptr;  	device_reg_t __iomem *reg; -	if (!ha->mqenable || IS_QLA83XX(ha)) +	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))  		return ptr;  	mq = ptr; @@ -570,12 +665,13 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)  	if (rval != QLA_SUCCESS) {  		ql_log(ql_log_warn, vha, 0xd000, -		    "Failed to dump firmware (%x).\n", rval); +		    "Failed to dump firmware (%x), dump status flags (0x%lx).\n", +		    rval, ha->fw_dump_cap_flags);  		ha->fw_dumped = 0;  	} else {  		ql_log(ql_log_info, vha, 0xd001, -		    "Firmware dump saved to temp buffer (%ld/%p).\n", -		    vha->host_no, ha->fw_dump); +		    "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", +		    vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);  		ha->fw_dumped = 1;  		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);  	} @@ -964,6 +1060,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)  	risc_address = ext_mem_cnt = 0;  	flags = 0; +	ha->fw_dump_cap_flags = 0;  	if (!hardware_locked)  		spin_lock_irqsave(&ha->hardware_lock, flags); @@ -986,10 +1083,11 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)  	fw->host_status = htonl(RD_REG_DWORD(®->host_status)); -	/* Pause RISC. */ -	rval = qla24xx_pause_risc(reg); -	if (rval != QLA_SUCCESS) -		goto qla24xx_fw_dump_failed_0; +	/* +	 * Pause RISC. No need to track timeout, as resetting the chip +	 * is the right approach incase of pause timeout +	 */ +	qla24xx_pause_risc(reg, ha);  	/* Host interface registers. */  	dmp_reg = ®->flash_addr; @@ -1213,6 +1311,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)  	risc_address = ext_mem_cnt = 0;  	flags = 0; +	ha->fw_dump_cap_flags = 0;  	if (!hardware_locked)  		spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1236,10 +1335,11 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)  	fw->host_status = htonl(RD_REG_DWORD(®->host_status)); -	/* Pause RISC. */ -	rval = qla24xx_pause_risc(reg); -	if (rval != QLA_SUCCESS) -		goto qla25xx_fw_dump_failed_0; +	/* +	 * Pause RISC. No need to track timeout, as resetting the chip +	 * is the right approach incase of pause timeout +	 */ +	qla24xx_pause_risc(reg, ha);  	/* Host/Risc registers. */  	iter_reg = fw->host_risc_reg; @@ -1530,6 +1630,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)  	risc_address = ext_mem_cnt = 0;  	flags = 0; +	ha->fw_dump_cap_flags = 0;  	if (!hardware_locked)  		spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1552,10 +1653,11 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)  	fw->host_status = htonl(RD_REG_DWORD(®->host_status)); -	/* Pause RISC. */ -	rval = qla24xx_pause_risc(reg); -	if (rval != QLA_SUCCESS) -		goto qla81xx_fw_dump_failed_0; +	/* +	 * Pause RISC. No need to track timeout, as resetting the chip +	 * is the right approach incase of pause timeout +	 */ +	qla24xx_pause_risc(reg, ha);  	/* Host/Risc registers. */  	iter_reg = fw->host_risc_reg; @@ -1849,6 +1951,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)  	risc_address = ext_mem_cnt = 0;  	flags = 0; +	ha->fw_dump_cap_flags = 0;  	if (!hardware_locked)  		spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1870,10 +1973,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)  	fw->host_status = htonl(RD_REG_DWORD(®->host_status)); -	/* Pause RISC. */ -	rval = qla24xx_pause_risc(reg); -	if (rval != QLA_SUCCESS) -		goto qla83xx_fw_dump_failed_0; +	/* +	 * Pause RISC. No need to track timeout, as resetting the chip +	 * is the right approach incase of pause timeout +	 */ +	qla24xx_pause_risc(reg, ha);  	WRT_REG_DWORD(®->iobase_addr, 0x6000);  	dmp_reg = ®->iobase_window; @@ -2296,9 +2400,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)  			nxt += sizeof(fw->code_ram);  			nxt += (ha->fw_memory_size - 0x100000 + 1);  			goto copy_queue; -		} else +		} else { +			set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);  			ql_log(ql_log_warn, vha, 0xd010,  			    "bigger hammer success?\n"); +		}  	}  	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram), diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 35e20b4f8b6..e1fc4e66966 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -348,3 +348,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);  #define ql_dbg_tgt	0x00004000 /* Target mode */  #define ql_dbg_tgt_mgt	0x00002000 /* Target mode management */  #define ql_dbg_tgt_tmr	0x00001000 /* Target mode task management */ + +extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, +	uint32_t, void **); +extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, +	uint32_t, void **); +extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, +	struct qla_hw_data *); +extern int qla24xx_soft_reset(struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 93db74ef346..de5d0ae19d8 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -654,7 +654,7 @@ typedef union {  		struct device_reg_25xxmq isp25mq;  		struct device_reg_82xx isp82;  		struct device_reg_fx00 ispfx00; -} device_reg_t; +} __iomem device_reg_t;  #define ISP_REQ_Q_IN(ha, reg) \  	(IS_QLA2100(ha) || IS_QLA2200(ha) ? \ @@ -808,7 +808,7 @@ struct mbx_cmd_32 {  					   Notification */  #define MBA_FW_POLL_STATE	0x8600  /* Firmware in poll diagnostic state */  #define MBA_FW_RESET_FCT	0x8502	/* Firmware reset factory defaults */ - +#define MBA_FW_INIT_INPROGRESS	0x8500	/* Firmware boot in progress */  /* 83XX FCoE specific */  #define MBA_IDC_AEN		0x8200  /* FCoE: NIC Core state change AEN */ @@ -862,7 +862,6 @@ struct mbx_cmd_32 {   */  #define MBC_LOAD_RAM			1	/* Load RAM. */  #define MBC_EXECUTE_FIRMWARE		2	/* Execute firmware. */ -#define MBC_WRITE_RAM_WORD		4	/* Write RAM word. */  #define MBC_READ_RAM_WORD		5	/* Read RAM word. */  #define MBC_MAILBOX_REGISTER_TEST	6	/* Wrap incoming mailboxes */  #define MBC_VERIFY_CHECKSUM		7	/* Verify checksum. */ @@ -937,6 +936,9 @@ struct mbx_cmd_32 {  /*   * ISP24xx mailbox commands   */ +#define MBC_WRITE_SERDES		0x3	/* Write serdes word. */ +#define MBC_READ_SERDES			0x4	/* Read serdes word. */ +#define MBC_LOAD_DUMP_MPI_RAM		0x5	/* Load/Dump MPI RAM. */  #define MBC_SERDES_PARAMS		0x10	/* Serdes Tx Parameters. */  #define MBC_GET_IOCB_STATUS		0x12	/* Get IOCB status command. */  #define MBC_PORT_PARAMS			0x1A	/* Port iDMA Parameters. */ @@ -963,6 +965,13 @@ struct mbx_cmd_32 {   */  #define MBC_WRITE_MPI_REGISTER		0x01    /* Write MPI Register. */ +/* + * ISP8044 mailbox commands + */ +#define MBC_SET_GET_ETH_SERDES_REG	0x150 +#define HCS_WRITE_SERDES		0x3 +#define HCS_READ_SERDES			0x4 +  /* Firmware return data sizes */  #define FCAL_MAP_SIZE	128 @@ -1196,30 +1205,6 @@ typedef struct {  	uint8_t  reserved_3[26];  } init_cb_t; - -struct init_cb_fx { -	uint16_t	version; -	uint16_t	reserved_1[13]; -	__le16		request_q_outpointer; -	__le16		response_q_inpointer; -	uint16_t	reserved_2[2]; -	__le16		response_q_length; -	__le16		request_q_length; -	uint16_t	reserved_3[2]; -	__le32		request_q_address[2]; -	__le32		response_q_address[2]; -	uint16_t	reserved_4[4]; -	uint8_t		response_q_msivec; -	uint8_t		reserved_5[19]; -	uint16_t	interrupt_delay_timer; -	uint16_t	reserved_6; -	uint32_t	fwoptions1; -	uint32_t	fwoptions2; -	uint32_t	fwoptions3; -	uint8_t		reserved_7[24]; -}; - -  /*   * Get Link Status mailbox command return buffer.   */ @@ -1644,25 +1629,35 @@ typedef struct {  #define PO_MODE_DIF_PASS	2  #define PO_MODE_DIF_REPLACE	3  #define PO_MODE_DIF_TCP_CKSUM	6 -#define PO_ENABLE_DIF_BUNDLING	BIT_8  #define PO_ENABLE_INCR_GUARD_SEED	BIT_3 -#define PO_DISABLE_INCR_REF_TAG	BIT_5  #define PO_DISABLE_GUARD_CHECK	BIT_4 +#define PO_DISABLE_INCR_REF_TAG	BIT_5 +#define PO_DIS_HEADER_MODE	BIT_7 +#define PO_ENABLE_DIF_BUNDLING	BIT_8 +#define PO_DIS_FRAME_MODE	BIT_9 +#define PO_DIS_VALD_APP_ESC	BIT_10 /* Dis validation for escape tag/ffffh */ +#define PO_DIS_VALD_APP_REF_ESC BIT_11 + +#define PO_DIS_APP_TAG_REPL	BIT_12 /* disable REG Tag replacement */ +#define PO_DIS_REF_TAG_REPL	BIT_13 +#define PO_DIS_APP_TAG_VALD	BIT_14 /* disable REF Tag validation */ +#define PO_DIS_REF_TAG_VALD	BIT_15 +  /*   * ISP queue - 64-Bit addressing, continuation crc entry structure definition.   */  struct crc_context {  	uint32_t handle;		/* System handle. */ -	uint32_t ref_tag; -	uint16_t app_tag; +	__le32 ref_tag; +	__le16 app_tag;  	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/  	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/ -	uint16_t guard_seed;		/* Initial Guard Seed */ -	uint16_t prot_opts;		/* Requested Data Protection Mode */ -	uint16_t blk_size;		/* Data size in bytes */ +	__le16 guard_seed;		/* Initial Guard Seed */ +	__le16 prot_opts;		/* Requested Data Protection Mode */ +	__le16 blk_size;		/* Data size in bytes */  	uint16_t runt_blk_guard;	/* Guard value for runt block (tape  					 * only) */ -	uint32_t byte_count;		/* Total byte count/ total data +	__le32 byte_count;		/* Total byte count/ total data  					 * transfer count */  	union {  		struct { @@ -1676,10 +1671,10 @@ struct crc_context {  			uint32_t	reserved_6;  		} nobundling;  		struct { -			uint32_t	dif_byte_count;	/* Total DIF byte +			__le32	dif_byte_count;	/* Total DIF byte  							 * count */  			uint16_t	reserved_1; -			uint16_t	dseg_count;	/* Data segment count */ +			__le16	dseg_count;	/* Data segment count */  			uint32_t	reserved_2;  			uint32_t	data_address[2];  			uint32_t	data_length; @@ -1770,6 +1765,8 @@ typedef struct {  #define CS_PORT_CONFIG_CHG	0x2A	/* Port Configuration Changed */  #define CS_PORT_BUSY		0x2B	/* Port Busy */  #define CS_COMPLETE_CHKCOND	0x30	/* Error? */ +#define CS_IOCB_ERROR		0x31	/* Generic error for IOCB request +					   failure */  #define CS_BAD_PAYLOAD		0x80	/* Driver defined */  #define CS_UNKNOWN		0x81	/* Driver defined */  #define CS_RETRY		0x82	/* Driver defined */ @@ -2171,6 +2168,7 @@ struct ct_fdmi_hba_attributes {  #define FDMI_PORT_SPEED_4GB		0x8  #define FDMI_PORT_SPEED_8GB		0x10  #define FDMI_PORT_SPEED_16GB		0x20 +#define FDMI_PORT_SPEED_32GB		0x40  #define FDMI_PORT_SPEED_UNKNOWN		0x8000  struct ct_fdmi_port_attr { @@ -2679,7 +2677,7 @@ struct bidi_statistics {  #define QLA_MQ_SIZE 32  #define QLA_MAX_QUEUES 256  #define ISP_QUE_REG(ha, id) \ -	((ha->mqenable || IS_QLA83XX(ha)) ? \ +	((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \  	 ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\  	 ((void __iomem *)ha->iobase))  #define QLA_REQ_QUE_ID(tag) \ @@ -2697,6 +2695,7 @@ struct rsp_que {  	uint32_t __iomem *rsp_q_out;  	uint16_t  ring_index;  	uint16_t  out_ptr; +	uint16_t  *in_ptr;		/* queue shadow in index */  	uint16_t  length;  	uint16_t  options;  	uint16_t  rid; @@ -2723,6 +2722,7 @@ struct req_que {  	uint32_t __iomem *req_q_out;  	uint16_t  ring_index;  	uint16_t  in_ptr; +	uint16_t  *out_ptr;		/* queue shadow out index */  	uint16_t  cnt;  	uint16_t  length;  	uint16_t  options; @@ -2734,7 +2734,6 @@ struct req_que {  	srb_t **outstanding_cmds;  	uint32_t current_outstanding_cmd;  	uint16_t num_outstanding_cmds; -#define	MAX_Q_DEPTH		32  	int max_q_depth;  	dma_addr_t  dma_fx00; @@ -2750,6 +2749,13 @@ struct qlfc_fw {  	uint32_t len;  }; +struct scsi_qlt_host { +	void *target_lport_ptr; +	struct mutex tgt_mutex; +	struct mutex tgt_host_action_mutex; +	struct qla_tgt *qla_tgt; +}; +  struct qlt_hw_data {  	/* Protected by hw lock */  	uint32_t enable_class_2:1; @@ -2765,15 +2771,11 @@ struct qlt_hw_data {  	uint32_t __iomem *atio_q_in;  	uint32_t __iomem *atio_q_out; -	void *target_lport_ptr;  	struct qla_tgt_func_tmpl *tgt_ops; -	struct qla_tgt *qla_tgt;  	struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];  	uint16_t current_handle;  	struct qla_tgt_vp_map *tgt_vp_map; -	struct mutex tgt_mutex; -	struct mutex tgt_host_action_mutex;  	int saved_set;  	uint16_t saved_exchange_count; @@ -2815,7 +2817,6 @@ struct qla_hw_data {  		uint32_t	fac_supported		:1;  		uint32_t	chip_reset_done		:1; -		uint32_t	port0			:1;  		uint32_t	running_gold_fw		:1;  		uint32_t	eeh_busy		:1;  		uint32_t	cpu_affinity_enabled	:1; @@ -2846,7 +2847,7 @@ struct qla_hw_data {  	spinlock_t	hardware_lock ____cacheline_aligned;  	int		bars;  	int		mem_only; -	device_reg_t __iomem *iobase;           /* Base I/O address */ +	device_reg_t *iobase;           /* Base I/O address */  	resource_size_t pio_address;  #define MIN_IOBASE_LEN          0x100 @@ -2865,8 +2866,8 @@ struct qla_hw_data {  	uint32_t		rsp_que_off;  	/* Multi queue data structs */ -	device_reg_t __iomem *mqiobase; -	device_reg_t __iomem *msixbase; +	device_reg_t *mqiobase; +	device_reg_t *msixbase;  	uint16_t        msix_count;  	uint8_t         mqenable;  	struct req_que **req_q_map; @@ -2902,6 +2903,7 @@ struct qla_hw_data {  #define PORT_SPEED_4GB  0x03  #define PORT_SPEED_8GB  0x04  #define PORT_SPEED_16GB 0x05 +#define PORT_SPEED_32GB 0x06  #define PORT_SPEED_10GB	0x13  	uint16_t	link_data_rate;         /* F/W operating speed */ @@ -2925,6 +2927,9 @@ struct qla_hw_data {  #define PCI_DEVICE_ID_QLOGIC_ISP8001	0x8001  #define PCI_DEVICE_ID_QLOGIC_ISP8031	0x8031  #define PCI_DEVICE_ID_QLOGIC_ISP2031	0x2031 +#define PCI_DEVICE_ID_QLOGIC_ISP2071	0x2071 +#define PCI_DEVICE_ID_QLOGIC_ISP2271	0x2271 +  	uint32_t	device_type;  #define DT_ISP2100                      BIT_0  #define DT_ISP2200                      BIT_1 @@ -2945,7 +2950,9 @@ struct qla_hw_data {  #define DT_ISP8031			BIT_16  #define DT_ISPFX00			BIT_17  #define DT_ISP8044			BIT_18 -#define DT_ISP_LAST			(DT_ISP8044 << 1) +#define DT_ISP2071			BIT_19 +#define DT_ISP2271			BIT_20 +#define DT_ISP_LAST			(DT_ISP2271 << 1)  #define DT_T10_PI                       BIT_25  #define DT_IIDMA                        BIT_26 @@ -2975,6 +2982,8 @@ struct qla_hw_data {  #define IS_QLA2031(ha)	(DT_MASK(ha) & DT_ISP2031)  #define IS_QLA8031(ha)	(DT_MASK(ha) & DT_ISP8031)  #define IS_QLAFX00(ha)	(DT_MASK(ha) & DT_ISPFX00) +#define IS_QLA2071(ha)	(DT_MASK(ha) & DT_ISP2071) +#define IS_QLA2271(ha)	(DT_MASK(ha) & DT_ISP2271)  #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \  			IS_QLA6312(ha) || IS_QLA6322(ha)) @@ -2983,6 +2992,7 @@ struct qla_hw_data {  #define IS_QLA25XX(ha)  (IS_QLA2532(ha))  #define IS_QLA83XX(ha)	(IS_QLA2031(ha) || IS_QLA8031(ha))  #define IS_QLA84XX(ha)  (IS_QLA8432(ha)) +#define IS_QLA27XX(ha)  (IS_QLA2071(ha) || IS_QLA2271(ha))  #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \  				IS_QLA84XX(ha))  #define IS_CNA_CAPABLE(ha)	(IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ @@ -2991,12 +3001,13 @@ struct qla_hw_data {  #define IS_QLA2XXX_MIDTYPE(ha)	(IS_QLA24XX(ha) || IS_QLA84XX(ha) || \  				IS_QLA25XX(ha) || IS_QLA81XX(ha) || \  				IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ -				IS_QLA8044(ha)) +				IS_QLA8044(ha) || IS_QLA27XX(ha))  #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha)) -#define IS_NOPOLLING_TYPE(ha)	((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ -			IS_QLA83XX(ha)) && (ha)->flags.msix_enabled) -#define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha)) -#define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha)) +#define IS_NOPOLLING_TYPE(ha)	(IS_QLA81XX(ha) && (ha)->flags.msix_enabled) +#define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ +				IS_QLA27XX(ha)) +#define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ +				IS_QLA27XX(ha))  #define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))  #define IS_T10_PI_CAPABLE(ha)   ((ha)->device_type & DT_T10_PI) @@ -3006,7 +3017,8 @@ struct qla_hw_data {  #define IS_OEM_001(ha)          ((ha)->device_type & DT_OEM_001)  #define HAS_EXTENDED_IDS(ha)    ((ha)->device_type & DT_EXTENDED_IDS)  #define IS_CT6_SUPPORTED(ha)	((ha)->device_type & DT_CT6_SUPPORTED) -#define IS_MQUE_CAPABLE(ha)	((ha)->mqenable || IS_QLA83XX(ha)) +#define IS_MQUE_CAPABLE(ha)	((ha)->mqenable || IS_QLA83XX(ha) || \ +				IS_QLA27XX(ha))  #define IS_BIDI_CAPABLE(ha)	((IS_QLA25XX(ha) || IS_QLA2031(ha)))  /* Bit 21 of fw_attributes decides the MCTP capabilities */  #define IS_MCTP_CAPABLE(ha)	(IS_QLA2031(ha) && \ @@ -3019,6 +3031,7 @@ struct qla_hw_data {      (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))  #define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))  #define IS_TGT_MODE_CAPABLE(ha)	(ha->tgt.atio_q_length) +#define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha))  	/* HBA serial number */  	uint8_t		serial0; @@ -3131,6 +3144,9 @@ struct qla_hw_data {  	uint16_t	fw_xcb_count;  	uint16_t	fw_iocb_count; +	uint32_t	fw_shared_ram_start; +	uint32_t	fw_shared_ram_end; +  	uint16_t	fw_options[16];         /* slots: 1,2,3,10,11 */  	uint8_t		fw_seriallink_options[4];  	uint16_t	fw_seriallink_options24[4]; @@ -3139,11 +3155,22 @@ struct qla_hw_data {  	uint32_t	mpi_capabilities;  	uint8_t		phy_version[3]; +	/* Firmware dump template */ +	void		*fw_dump_template; +	uint32_t	fw_dump_template_len;  	/* Firmware dump information. */  	struct qla2xxx_fw_dump *fw_dump;  	uint32_t	fw_dump_len;  	int		fw_dumped; +	unsigned long	fw_dump_cap_flags; +#define RISC_PAUSE_CMPL		0 +#define DMA_SHUTDOWN_CMPL	1 +#define ISP_RESET_CMPL		2 +#define RISC_RDY_AFT_RESET	3 +#define RISC_SRAM_DUMP_CMPL	4 +#define RISC_EXT_MEM_DUMP_CMPL	5  	int		fw_dump_reading; +	int		prev_minidump_failed;  	dma_addr_t	eft_dma;  	void		*eft;  /* Current size of mctp dump is 0x086064 bytes */ @@ -3181,6 +3208,7 @@ struct qla_hw_data {  #define QLA_SWRITING	2  	uint32_t	optrom_region_start;  	uint32_t	optrom_region_size; +	struct mutex	optrom_mutex;  /* PCI expansion ROM image information. */  #define ROM_CODE_TYPE_BIOS	0 @@ -3302,16 +3330,12 @@ struct qla_hw_data {  	struct work_struct nic_core_reset;  	struct work_struct idc_state_handler;  	struct work_struct nic_core_unrecoverable; - -#define HOST_QUEUE_RAMPDOWN_INTERVAL           (60 * HZ) -#define HOST_QUEUE_RAMPUP_INTERVAL             (30 * HZ) -	unsigned long   host_last_rampdown_time; -	unsigned long   host_last_rampup_time; -	int             cfg_lun_q_depth; +	struct work_struct board_disable;  	struct mr_data_fx00 mr;  	struct qlt_hw_data tgt; +	int	allow_cna_fw_dump;  };  /* @@ -3372,12 +3396,11 @@ typedef struct scsi_qla_host {  #define MPI_RESET_NEEDED	19	/* Initiate MPI FW reset */  #define ISP_QUIESCE_NEEDED	20	/* Driver need some quiescence */  #define SCR_PENDING		21	/* SCR in target mode */ -#define HOST_RAMP_DOWN_QUEUE_DEPTH     22 -#define HOST_RAMP_UP_QUEUE_DEPTH       23 -#define PORT_UPDATE_NEEDED	24 -#define FX00_RESET_RECOVERY	25 -#define FX00_TARGET_SCAN	26 -#define FX00_CRITEMP_RECOVERY	27 +#define PORT_UPDATE_NEEDED	22 +#define FX00_RESET_RECOVERY	23 +#define FX00_TARGET_SCAN	24 +#define FX00_CRITEMP_RECOVERY	25 +#define FX00_HOST_INFO_RESEND	26  	uint32_t	device_flags;  #define SWITCH_FOUND		BIT_0 @@ -3441,6 +3464,7 @@ typedef struct scsi_qla_host {  #define VP_ERR_FAB_LOGOUT	4  #define VP_ERR_ADAP_NORESOURCES	5  	struct qla_hw_data *hw; +	struct scsi_qlt_host vha_tgt;  	struct req_que *req;  	int		fw_heartbeat_counter;  	int		seconds_since_last_heartbeat; diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 792a29294b6..2ca39b8e716 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -114,7 +114,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)  {  	struct qla_hw_data *ha = vha->hw; -	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) +	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && +	    !IS_QLA27XX(ha))  		goto out;  	if (!ha->fce)  		goto out; diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 610d3aa905a..eb8f57249f1 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -371,7 +371,10 @@ struct init_cb_24xx {  	 * BIT 14 = Data Rate bit 1  	 * BIT 15 = Data Rate bit 2  	 * BIT 16 = Enable 75 ohm Termination Select -	 * BIT 17-31 = Reserved +	 * BIT 17-28 = Reserved +	 * BIT 29 = Enable response queue 0 in index shadowing +	 * BIT 30 = Enable request queue 0 out index shadowing +	 * BIT 31 = Reserved  	 */  	uint32_t firmware_options_3;  	uint16_t qos; @@ -1134,13 +1137,6 @@ struct device_reg_24xx {  #define MIN_MULTI_ID_FABRIC	64	/* Must be power-of-2. */  #define MAX_MULTI_ID_FABRIC	256	/* ... */ -#define for_each_mapped_vp_idx(_ha, _idx)		\ -	for (_idx = find_next_bit((_ha)->vp_idx_map,	\ -		(_ha)->max_npiv_vports + 1, 1);		\ -	    _idx <= (_ha)->max_npiv_vports;		\ -	    _idx = find_next_bit((_ha)->vp_idx_map,	\ -		(_ha)->max_npiv_vports + 1, _idx + 1))	\ -  struct mid_conf_entry_24xx {  	uint16_t reserved_1; @@ -1378,6 +1374,10 @@ struct qla_flt_header {  #define FLT_REG_NVRAM_0		0x15  #define FLT_REG_VPD_1		0x16  #define FLT_REG_NVRAM_1		0x17 +#define FLT_REG_VPD_2		0xD4 +#define FLT_REG_NVRAM_2		0xD5 +#define FLT_REG_VPD_3		0xD6 +#define FLT_REG_NVRAM_3		0xD7  #define FLT_REG_FDT		0x1a  #define FLT_REG_FLT		0x1c  #define FLT_REG_HW_EVENT_0	0x1d diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 4446bf5fe29..d48dea8fab1 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -98,7 +98,6 @@ extern int qlport_down_retry;  extern int ql2xplogiabsentdevice;  extern int ql2xloginretrycount;  extern int ql2xfdmienable; -extern int ql2xmaxqdepth;  extern int ql2xallocfwdump;  extern int ql2xextended_error_logging;  extern int ql2xiidmaenable; @@ -160,6 +159,9 @@ extern int qla83xx_clear_drv_presence(scsi_qla_host_t *vha);  extern int __qla83xx_clear_drv_presence(scsi_qla_host_t *vha);  extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); +extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); +extern void qla2x00_disable_board_on_pci_error(struct work_struct *); +  /*   * Global Functions in qla_mid.c source file.   */ @@ -218,6 +220,13 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);  extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);  extern int qla2x00_issue_marker(scsi_qla_host_t *, int); +extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, +	uint32_t *, uint16_t, struct qla_tgt_cmd *); +extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, +	uint32_t *, uint16_t, struct qla_tgt_cmd *); +extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, +	uint32_t *, uint16_t, struct qla_tgt_cmd *); +  /*   * Global Function Prototypes in qla_mbx.c source file. @@ -328,6 +337,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,      dma_addr_t);  extern int qla24xx_abort_command(srb_t *); +extern int qla24xx_async_abort_command(srb_t *);  extern int  qla24xx_abort_target(struct fc_port *, unsigned int, int);  extern int @@ -339,6 +349,16 @@ extern int  qla2x00_system_error(scsi_qla_host_t *);  extern int +qla2x00_write_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t); +extern int +qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *); + +extern int +qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t); +extern int +qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *); + +extern int  qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);  extern int @@ -455,6 +475,7 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,  extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,  				    uint32_t);  extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t); +bool qla2x00_check_reg_for_disconnect(scsi_qla_host_t *, uint32_t);  extern int qla2x00_beacon_on(struct scsi_qla_host *);  extern int qla2x00_beacon_off(struct scsi_qla_host *); @@ -503,6 +524,16 @@ extern void qla2300_fw_dump(scsi_qla_host_t *, int);  extern void qla24xx_fw_dump(scsi_qla_host_t *, int);  extern void qla25xx_fw_dump(scsi_qla_host_t *, int);  extern void qla81xx_fw_dump(scsi_qla_host_t *, int); +extern void qla82xx_fw_dump(scsi_qla_host_t *, int); +extern void qla8044_fw_dump(scsi_qla_host_t *, int); + +extern void qla27xx_fwdump(scsi_qla_host_t *, int); +extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *); +extern int qla27xx_fwdt_template_valid(void *); +extern ulong qla27xx_fwdt_template_size(void *); +extern const void *qla27xx_fwdt_template_default(void); +extern ulong qla27xx_fwdt_template_default_size(void); +  extern void qla2x00_dump_regs(scsi_qla_host_t *);  extern void qla2x00_dump_buffer(uint8_t *, uint32_t);  extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); @@ -541,10 +572,9 @@ struct fc_function_template;  extern struct fc_function_template qla2xxx_transport_functions;  extern struct fc_function_template qla2xxx_transport_vport_functions;  extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); -extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *); +extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *, bool);  extern void qla2x00_init_host_attr(scsi_qla_host_t *);  extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *); -extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);  extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16_t *);  extern int qla2x00_echo_test(scsi_qla_host_t *,  	struct msg_echo_lb *, uint16_t *); @@ -587,7 +617,6 @@ extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);  extern irqreturn_t qlafx00_intr_handler(int, void *);  extern void qlafx00_enable_intrs(struct qla_hw_data *);  extern void qlafx00_disable_intrs(struct qla_hw_data *); -extern int qlafx00_abort_command(srb_t *);  extern int qlafx00_abort_target(fc_port_t *, unsigned int, int);  extern int qlafx00_lun_reset(fc_port_t *, unsigned int, int);  extern int qlafx00_start_scsi(srb_t *); @@ -725,7 +754,7 @@ extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);  extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);  extern int qla8044_device_state_handler(struct scsi_qla_host *vha);  extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha); -extern void qla8044_clear_drv_active(struct scsi_qla_host *vha); +extern void qla8044_clear_drv_active(struct qla_hw_data *);  void qla8044_get_minidump(struct scsi_qla_host *vha);  int qla8044_collect_md_data(struct scsi_qla_host *vha);  extern int qla8044_md_get_template(scsi_qla_host_t *); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index cd47f1b32d9..a0df3b1b382 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -1532,6 +1532,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)  	if (IS_CNA_CAPABLE(ha))  		eiter->a.sup_speed = __constant_cpu_to_be32(  		    FDMI_PORT_SPEED_10GB); +	else if (IS_QLA27XX(ha)) +		eiter->a.sup_speed = __constant_cpu_to_be32( +		    FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB| +		    FDMI_PORT_SPEED_8GB);  	else if (IS_QLA25XX(ha))  		eiter->a.sup_speed = __constant_cpu_to_be32(  		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| @@ -1580,6 +1584,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)  		eiter->a.cur_speed =  		    __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);  		break; +	case PORT_SPEED_32GB: +		eiter->a.cur_speed = +		    __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB); +		break;  	default:  		eiter->a.cur_speed =  		    __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); @@ -1889,6 +1897,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)  			case BIT_10:  				list[i].fp_speed = PORT_SPEED_16GB;  				break; +			case BIT_8: +				list[i].fp_speed = PORT_SPEED_32GB; +				break;  			}  			ql_dbg(ql_dbg_disc, vha, 0x205b, diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 03f715e7591..e2184412617 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -271,56 +271,46 @@ done:  }  static void -qla2x00_async_tm_cmd_done(void *data, void *ptr, int res) +qla2x00_tmf_iocb_timeout(void *data)  { -	srb_t *sp = (srb_t *)ptr; -	struct srb_iocb *iocb = &sp->u.iocb_cmd; -	struct scsi_qla_host *vha = (scsi_qla_host_t *)data; -	uint32_t flags; -	uint16_t lun; -	int rval; - -	if (!test_bit(UNLOADING, &vha->dpc_flags)) { -		flags = iocb->u.tmf.flags; -		lun = (uint16_t)iocb->u.tmf.lun; +	srb_t *sp = (srb_t *)data; +	struct srb_iocb *tmf = &sp->u.iocb_cmd; -		/* Issue Marker IOCB */ -		rval = qla2x00_marker(vha, vha->hw->req_q_map[0], -			vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, -			flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); +	tmf->u.tmf.comp_status = CS_TIMEOUT; +	complete(&tmf->u.tmf.comp); +} -		if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) { -			ql_dbg(ql_dbg_taskm, vha, 0x8030, -			    "TM IOCB failed (%x).\n", rval); -		} -	} -	sp->free(sp->fcport->vha, sp); +static void +qla2x00_tmf_sp_done(void *data, void *ptr, int res) +{ +	srb_t *sp = (srb_t *)ptr; +	struct srb_iocb *tmf = &sp->u.iocb_cmd; +	complete(&tmf->u.tmf.comp);  }  int -qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun, +qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,  	uint32_t tag)  {  	struct scsi_qla_host *vha = fcport->vha; +	struct srb_iocb *tm_iocb;  	srb_t *sp; -	struct srb_iocb *tcf; -	int rval; +	int rval = QLA_FUNCTION_FAILED; -	rval = QLA_FUNCTION_FAILED;  	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);  	if (!sp)  		goto done; +	tm_iocb = &sp->u.iocb_cmd;  	sp->type = SRB_TM_CMD;  	sp->name = "tmf"; -	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - -	tcf = &sp->u.iocb_cmd; -	tcf->u.tmf.flags = tm_flags; -	tcf->u.tmf.lun = lun; -	tcf->u.tmf.data = tag; -	tcf->timeout = qla2x00_async_iocb_timeout; -	sp->done = qla2x00_async_tm_cmd_done; +	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); +	tm_iocb->u.tmf.flags = flags; +	tm_iocb->u.tmf.lun = lun; +	tm_iocb->u.tmf.data = tag; +	sp->done = qla2x00_tmf_sp_done; +	tm_iocb->timeout = qla2x00_tmf_iocb_timeout; +	init_completion(&tm_iocb->u.tmf.comp);  	rval = qla2x00_start_sp(sp);  	if (rval != QLA_SUCCESS) @@ -330,14 +320,121 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,  	    "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",  	    sp->handle, fcport->loop_id, fcport->d_id.b.domain,  	    fcport->d_id.b.area, fcport->d_id.b.al_pa); + +	wait_for_completion(&tm_iocb->u.tmf.comp); + +	rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? +	    QLA_SUCCESS : QLA_FUNCTION_FAILED; + +	if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { +		ql_dbg(ql_dbg_taskm, vha, 0x8030, +		    "TM IOCB failed (%x).\n", rval); +	} + +	if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { +		flags = tm_iocb->u.tmf.flags; +		lun = (uint16_t)tm_iocb->u.tmf.lun; + +		/* Issue Marker IOCB */ +		qla2x00_marker(vha, vha->hw->req_q_map[0], +		    vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, +		    flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); +	} + +done_free_sp: +	sp->free(vha, sp); +done:  	return rval; +} + +static void +qla24xx_abort_iocb_timeout(void *data) +{ +	srb_t *sp = (srb_t *)data; +	struct srb_iocb *abt = &sp->u.iocb_cmd; + +	abt->u.abt.comp_status = CS_TIMEOUT; +	complete(&abt->u.abt.comp); +} + +static void +qla24xx_abort_sp_done(void *data, void *ptr, int res) +{ +	srb_t *sp = (srb_t *)ptr; +	struct srb_iocb *abt = &sp->u.iocb_cmd; + +	complete(&abt->u.abt.comp); +} + +static int +qla24xx_async_abort_cmd(srb_t *cmd_sp) +{ +	scsi_qla_host_t *vha = cmd_sp->fcport->vha; +	fc_port_t *fcport = cmd_sp->fcport; +	struct srb_iocb *abt_iocb; +	srb_t *sp; +	int rval = QLA_FUNCTION_FAILED; + +	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); +	if (!sp) +		goto done; + +	abt_iocb = &sp->u.iocb_cmd; +	sp->type = SRB_ABT_CMD; +	sp->name = "abort"; +	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); +	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; +	sp->done = qla24xx_abort_sp_done; +	abt_iocb->timeout = qla24xx_abort_iocb_timeout; +	init_completion(&abt_iocb->u.abt.comp); + +	rval = qla2x00_start_sp(sp); +	if (rval != QLA_SUCCESS) +		goto done_free_sp; + +	ql_dbg(ql_dbg_async, vha, 0x507c, +	    "Abort command issued - hdl=%x, target_id=%x\n", +	    cmd_sp->handle, fcport->tgt_id); + +	wait_for_completion(&abt_iocb->u.abt.comp); + +	rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? +	    QLA_SUCCESS : QLA_FUNCTION_FAILED;  done_free_sp: -	sp->free(fcport->vha, sp); +	sp->free(vha, sp);  done:  	return rval;  } +int +qla24xx_async_abort_command(srb_t *sp) +{ +	unsigned long   flags = 0; + +	uint32_t	handle; +	fc_port_t	*fcport = sp->fcport; +	struct scsi_qla_host *vha = fcport->vha; +	struct qla_hw_data *ha = vha->hw; +	struct req_que *req = vha->req; + +	spin_lock_irqsave(&ha->hardware_lock, flags); +	for (handle = 1; handle < req->num_outstanding_cmds; handle++) { +		if (req->outstanding_cmds[handle] == sp) +			break; +	} +	spin_unlock_irqrestore(&ha->hardware_lock, flags); +	if (handle == req->num_outstanding_cmds) { +		/* Command not found. */ +		return QLA_FUNCTION_FAILED; +	} +	if (sp->type == SRB_FXIOCB_DCMD) +		return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, +		    FXDISC_ABORT_IOCTL); + +	return qla24xx_async_abort_cmd(sp); +} +  void  qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,      uint16_t *data) @@ -1379,7 +1476,13 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)  	}  	ha->fw_dumped = 0; -	fixed_size = mem_size = eft_size = fce_size = mq_size = 0; +	ha->fw_dump_cap_flags = 0; +	dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; +	req_q_size = rsp_q_size = 0; + +	if (IS_QLA27XX(ha)) +		goto try_fce; +  	if (IS_QLA2100(ha) || IS_QLA2200(ha)) {  		fixed_size = sizeof(struct qla2100_fw_dump);  	} else if (IS_QLA23XX(ha)) { @@ -1395,6 +1498,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)  			fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);  		else  			fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); +  		mem_size = (ha->fw_memory_size - 0x100000 + 1) *  		    sizeof(uint32_t);  		if (ha->mqenable) { @@ -1412,9 +1516,16 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)  		if (ha->tgt.atio_ring)  			mq_size += ha->tgt.atio_q_length * sizeof(request_t);  		/* Allocate memory for Fibre Channel Event Buffer. */ -		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) +		if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && +		    !IS_QLA27XX(ha))  			goto try_eft; +try_fce: +		if (ha->fce) +			dma_free_coherent(&ha->pdev->dev, +			    FCE_SIZE, ha->fce, ha->fce_dma); + +		/* Allocate memory for Fibre Channel Event Buffer. */  		tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,  		    GFP_KERNEL);  		if (!tc) { @@ -1442,7 +1553,12 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)  		ha->flags.fce_enabled = 1;  		ha->fce_dma = tc_dma;  		ha->fce = tc; +  try_eft: +		if (ha->eft) +			dma_free_coherent(&ha->pdev->dev, +			    EFT_SIZE, ha->eft, ha->eft_dma); +  		/* Allocate memory for Extended Trace Buffer. */  		tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,  		    GFP_KERNEL); @@ -1469,15 +1585,28 @@ try_eft:  		ha->eft_dma = tc_dma;  		ha->eft = tc;  	} +  cont_alloc: +	if (IS_QLA27XX(ha)) { +		if (!ha->fw_dump_template) { +			ql_log(ql_log_warn, vha, 0x00ba, +			    "Failed missing fwdump template\n"); +			return; +		} +		dump_size = qla27xx_fwdt_calculate_dump_size(vha); +		ql_dbg(ql_dbg_init, vha, 0x00fa, +		    "-> allocating fwdump (%x bytes)...\n", dump_size); +		goto allocate; +	} +  	req_q_size = req->length * sizeof(request_t);  	rsp_q_size = rsp->length * sizeof(response_t); -  	dump_size = offsetof(struct qla2xxx_fw_dump, isp);  	dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;  	ha->chain_offset = dump_size;  	dump_size += mq_size + fce_size; +allocate:  	ha->fw_dump = vmalloc(dump_size);  	if (!ha->fw_dump) {  		ql_log(ql_log_warn, vha, 0x00c4, @@ -1499,10 +1628,13 @@ cont_alloc:  		}  		return;  	} +	ha->fw_dump_len = dump_size;  	ql_dbg(ql_dbg_init, vha, 0x00c5,  	    "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); -	ha->fw_dump_len = dump_size; +	if (IS_QLA27XX(ha)) +		return; +  	ha->fw_dump->signature[0] = 'Q';  	ha->fw_dump->signature[1] = 'L';  	ha->fw_dump->signature[2] = 'G'; @@ -1694,6 +1826,8 @@ enable_82xx_npiv:  				if (!fw_major_version && ql2xallocfwdump  				    && !(IS_P3P_TYPE(ha)))  					qla2x00_alloc_fw_dump(vha); +			} else { +				goto failed;  			}  		} else {  			ql_log(ql_log_fatal, vha, 0x00cd, @@ -1716,9 +1850,6 @@ enable_82xx_npiv:  		spin_unlock_irqrestore(&ha->hardware_lock, flags);  	} -	if (IS_QLA83XX(ha)) -		goto skip_fac_check; -  	if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {  		uint32_t size; @@ -1731,8 +1862,8 @@ enable_82xx_npiv:  			    "Unsupported FAC firmware (%d.%02d.%02d).\n",  			    ha->fw_major_version, ha->fw_minor_version,  			    ha->fw_subminor_version); -skip_fac_check: -			if (IS_QLA83XX(ha)) { + +			if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {  				ha->flags.fac_supported = 0;  				rval = QLA_SUCCESS;  			} @@ -1931,7 +2062,11 @@ qla24xx_config_rings(struct scsi_qla_host *vha)  	icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));  	icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); -	if (ha->mqenable || IS_QLA83XX(ha)) { +	if (IS_SHADOW_REG_CAPABLE(ha)) +		icb->firmware_options_2 |= +		    __constant_cpu_to_le32(BIT_30|BIT_29); + +	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {  		icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);  		icb->rid = __constant_cpu_to_le16(rid);  		if (ha->flags.msix_enabled) { @@ -2008,6 +2143,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)  		req = ha->req_q_map[que];  		if (!req)  			continue; +		req->out_ptr = (void *)(req->ring + req->length); +		*req->out_ptr = 0;  		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)  			req->outstanding_cmds[cnt] = NULL; @@ -2023,6 +2160,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)  		rsp = ha->rsp_q_map[que];  		if (!rsp)  			continue; +		rsp->in_ptr = (void *)(rsp->ring + rsp->length); +		*rsp->in_ptr = 0;  		/* Initialize response queue entries */  		if (IS_QLAFX00(ha))  			qlafx00_init_response_q_entries(rsp); @@ -3276,7 +3415,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)  					    fcport->d_id.b.domain,  					    fcport->d_id.b.area,  					    fcport->d_id.b.al_pa); -					fcport->loop_id = FC_NO_LOOP_ID; +					qla2x00_clear_loop_id(fcport);  				}  			}  		} @@ -4597,7 +4736,6 @@ static int  qla2x00_restart_isp(scsi_qla_host_t *vha)  {  	int status = 0; -	uint32_t wait_time;  	struct qla_hw_data *ha = vha->hw;  	struct req_que *req = ha->req_q_map[0];  	struct rsp_que *rsp = ha->rsp_q_map[0]; @@ -4614,14 +4752,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)  	if (!status && !(status = qla2x00_init_rings(vha))) {  		clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);  		ha->flags.chip_reset_done = 1; +  		/* Initialize the queues in use */  		qla25xx_init_queues(ha);  		status = qla2x00_fw_ready(vha);  		if (!status) { -			ql_dbg(ql_dbg_taskm, vha, 0x8031, -			    "Start configure loop status = %d.\n", status); -  			/* Issue a marker after FW becomes ready. */  			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); @@ -4636,24 +4772,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)  				qlt_24xx_process_atio_queue(vha);  			spin_unlock_irqrestore(&ha->hardware_lock, flags); -			/* Wait at most MAX_TARGET RSCNs for a stable link. */ -			wait_time = 256; -			do { -				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); -				qla2x00_configure_loop(vha); -				wait_time--; -			} while (!atomic_read(&vha->loop_down_timer) && -				!(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) -				&& wait_time && (test_bit(LOOP_RESYNC_NEEDED, -				&vha->dpc_flags))); +			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);  		}  		/* if no cable then assume it's good */  		if ((vha->device_flags & DFLG_NO_CABLE))  			status = 0; - -		ql_dbg(ql_dbg_taskm, vha, 0x8032, -		    "Configure loop done, status = 0x%x.\n", status);  	}  	return (status);  } @@ -4790,13 +4914,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)  	nv = ha->nvram;  	/* Determine NVRAM starting address. */ -	if (ha->flags.port0) { +	if (ha->port_no == 0) {  		ha->nvram_base = FA_NVRAM_FUNC0_ADDR;  		ha->vpd_base = FA_NVRAM_VPD0_ADDR;  	} else {  		ha->nvram_base = FA_NVRAM_FUNC1_ADDR;  		ha->vpd_base = FA_NVRAM_VPD1_ADDR;  	} +  	ha->nvram_size = sizeof(struct nvram_24xx);  	ha->vpd_size = FA_NVRAM_VPD_SIZE; @@ -4840,7 +4965,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)  		nv->exchange_count = __constant_cpu_to_le16(0);  		nv->hard_address = __constant_cpu_to_le16(124);  		nv->port_name[0] = 0x21; -		nv->port_name[1] = 0x00 + ha->port_no; +		nv->port_name[1] = 0x00 + ha->port_no + 1;  		nv->port_name[2] = 0x00;  		nv->port_name[3] = 0xe0;  		nv->port_name[4] = 0x8b; @@ -5115,6 +5240,99 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,  		segments--;  	} +	if (!IS_QLA27XX(ha)) +		return rval; + +	if (ha->fw_dump_template) +		vfree(ha->fw_dump_template); +	ha->fw_dump_template = NULL; +	ha->fw_dump_template_len = 0; + +	ql_dbg(ql_dbg_init, vha, 0x0161, +	    "Loading fwdump template from %x\n", faddr); +	qla24xx_read_flash_data(vha, dcode, faddr, 7); +	risc_size = be32_to_cpu(dcode[2]); +	ql_dbg(ql_dbg_init, vha, 0x0162, +	    "-> array size %x dwords\n", risc_size); +	if (risc_size == 0 || risc_size == ~0) +		goto default_template; + +	dlen = (risc_size - 8) * sizeof(*dcode); +	ql_dbg(ql_dbg_init, vha, 0x0163, +	    "-> template allocating %x bytes...\n", dlen); +	ha->fw_dump_template = vmalloc(dlen); +	if (!ha->fw_dump_template) { +		ql_log(ql_log_warn, vha, 0x0164, +		    "Failed fwdump template allocate %x bytes.\n", risc_size); +		goto default_template; +	} + +	faddr += 7; +	risc_size -= 8; +	dcode = ha->fw_dump_template; +	qla24xx_read_flash_data(vha, dcode, faddr, risc_size); +	for (i = 0; i < risc_size; i++) +		dcode[i] = le32_to_cpu(dcode[i]); + +	if (!qla27xx_fwdt_template_valid(dcode)) { +		ql_log(ql_log_warn, vha, 0x0165, +		    "Failed fwdump template validate\n"); +		goto default_template; +	} + +	dlen = qla27xx_fwdt_template_size(dcode); +	ql_dbg(ql_dbg_init, vha, 0x0166, +	    "-> template size %x bytes\n", dlen); +	if (dlen > risc_size * sizeof(*dcode)) { +		ql_log(ql_log_warn, vha, 0x0167, +		    "Failed fwdump template exceeds array by %x bytes\n", +		    (uint32_t)(dlen - risc_size * sizeof(*dcode))); +		goto default_template; +	} +	ha->fw_dump_template_len = dlen; +	return rval; + +default_template: +	ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n"); +	if (ha->fw_dump_template) +		vfree(ha->fw_dump_template); +	ha->fw_dump_template = NULL; +	ha->fw_dump_template_len = 0; + +	dlen = qla27xx_fwdt_template_default_size(); +	ql_dbg(ql_dbg_init, vha, 0x0169, +	    "-> template allocating %x bytes...\n", dlen); +	ha->fw_dump_template = vmalloc(dlen); +	if (!ha->fw_dump_template) { +		ql_log(ql_log_warn, vha, 0x016a, +		    "Failed fwdump template allocate %x bytes.\n", risc_size); +		goto failed_template; +	} + +	dcode = ha->fw_dump_template; +	risc_size = dlen / sizeof(*dcode); +	memcpy(dcode, qla27xx_fwdt_template_default(), dlen); +	for (i = 0; i < risc_size; i++) +		dcode[i] = be32_to_cpu(dcode[i]); + +	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { +		ql_log(ql_log_warn, vha, 0x016b, +		    "Failed fwdump template validate\n"); +		goto failed_template; +	} + +	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); +	ql_dbg(ql_dbg_init, vha, 0x016c, +	    "-> template size %x bytes\n", dlen); +	ha->fw_dump_template_len = dlen; +	return rval; + +failed_template: +	ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n"); +	if (ha->fw_dump_template) +		vfree(ha->fw_dump_template); +	ha->fw_dump_template = NULL; +	ha->fw_dump_template_len = 0;  	return rval;  } @@ -5229,7 +5447,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)  	uint32_t risc_size;  	uint32_t i;  	struct fw_blob *blob; -	uint32_t *fwcode, fwclen; +	const uint32_t *fwcode; +	uint32_t fwclen;  	struct qla_hw_data *ha = vha->hw;  	struct req_que *req = ha->req_q_map[0]; @@ -5261,7 +5480,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)  		ql_log(ql_log_fatal, vha, 0x0093,  		    "Unable to verify integrity of firmware image (%Zd).\n",  		    blob->fw->size); -		goto fail_fw_integrity; +		return QLA_FUNCTION_FAILED;  	}  	for (i = 0; i < 4; i++)  		dcode[i] = be32_to_cpu(fwcode[i + 4]); @@ -5275,7 +5494,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)  		ql_log(ql_log_fatal, vha, 0x0095,  		    "Firmware data: %08x %08x %08x %08x.\n",  		    dcode[0], dcode[1], dcode[2], dcode[3]); -		goto fail_fw_integrity; +		return QLA_FUNCTION_FAILED;  	}  	while (segments && rval == QLA_SUCCESS) { @@ -5289,8 +5508,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)  			ql_log(ql_log_fatal, vha, 0x0096,  			    "Unable to verify integrity of firmware image "  			    "(%Zd).\n", blob->fw->size); - -			goto fail_fw_integrity; +			return QLA_FUNCTION_FAILED;  		}  		fragment = 0; @@ -5324,10 +5542,100 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)  		/* Next segment. */  		segments--;  	} + +	if (!IS_QLA27XX(ha)) +		return rval; + +	if (ha->fw_dump_template) +		vfree(ha->fw_dump_template); +	ha->fw_dump_template = NULL; +	ha->fw_dump_template_len = 0; + +	ql_dbg(ql_dbg_init, vha, 0x171, +	    "Loading fwdump template from %x\n", +	    (uint32_t)((void *)fwcode - (void *)blob->fw->data)); +	risc_size = be32_to_cpu(fwcode[2]); +	ql_dbg(ql_dbg_init, vha, 0x172, +	    "-> array size %x dwords\n", risc_size); +	if (risc_size == 0 || risc_size == ~0) +		goto default_template; + +	dlen = (risc_size - 8) * sizeof(*fwcode); +	ql_dbg(ql_dbg_init, vha, 0x0173, +	    "-> template allocating %x bytes...\n", dlen); +	ha->fw_dump_template = vmalloc(dlen); +	if (!ha->fw_dump_template) { +		ql_log(ql_log_warn, vha, 0x0174, +		    "Failed fwdump template allocate %x bytes.\n", risc_size); +		goto default_template; +	} + +	fwcode += 7; +	risc_size -= 8; +	dcode = ha->fw_dump_template; +	for (i = 0; i < risc_size; i++) +		dcode[i] = le32_to_cpu(fwcode[i]); + +	if (!qla27xx_fwdt_template_valid(dcode)) { +		ql_log(ql_log_warn, vha, 0x0175, +		    "Failed fwdump template validate\n"); +		goto default_template; +	} + +	dlen = qla27xx_fwdt_template_size(dcode); +	ql_dbg(ql_dbg_init, vha, 0x0176, +	    "-> template size %x bytes\n", dlen); +	if (dlen > risc_size * sizeof(*fwcode)) { +		ql_log(ql_log_warn, vha, 0x0177, +		    "Failed fwdump template exceeds array by %x bytes\n", +		    (uint32_t)(dlen - risc_size * sizeof(*fwcode))); +		goto default_template; +	} +	ha->fw_dump_template_len = dlen;  	return rval; -fail_fw_integrity: -	return QLA_FUNCTION_FAILED; +default_template: +	ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n"); +	if (ha->fw_dump_template) +		vfree(ha->fw_dump_template); +	ha->fw_dump_template = NULL; +	ha->fw_dump_template_len = 0; + +	dlen = qla27xx_fwdt_template_default_size(); +	ql_dbg(ql_dbg_init, vha, 0x0179, +	    "-> template allocating %x bytes...\n", dlen); +	ha->fw_dump_template = vmalloc(dlen); +	if (!ha->fw_dump_template) { +		ql_log(ql_log_warn, vha, 0x017a, +		    "Failed fwdump template allocate %x bytes.\n", risc_size); +		goto failed_template; +	} + +	dcode = ha->fw_dump_template; +	risc_size = dlen / sizeof(*fwcode); +	fwcode = qla27xx_fwdt_template_default(); +	for (i = 0; i < risc_size; i++) +		dcode[i] = be32_to_cpu(fwcode[i]); + +	if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { +		ql_log(ql_log_warn, vha, 0x017b, +		    "Failed fwdump template validate\n"); +		goto failed_template; +	} + +	dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); +	ql_dbg(ql_dbg_init, vha, 0x017c, +	    "-> template size %x bytes\n", dlen); +	ha->fw_dump_template_len = dlen; +	return rval; + +failed_template: +	ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n"); +	if (ha->fw_dump_template) +		vfree(ha->fw_dump_template); +	ha->fw_dump_template = NULL; +	ha->fw_dump_template_len = 0; +	return rval;  }  int @@ -5603,7 +5911,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)  		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);  		nv->exchange_count = __constant_cpu_to_le16(0);  		nv->port_name[0] = 0x21; -		nv->port_name[1] = 0x00 + ha->port_no; +		nv->port_name[1] = 0x00 + ha->port_no + 1;  		nv->port_name[2] = 0x00;  		nv->port_name[3] = 0xe0;  		nv->port_name[4] = 0x8b; @@ -5637,7 +5945,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)  		nv->enode_mac[2] = 0xDD;  		nv->enode_mac[3] = 0x04;  		nv->enode_mac[4] = 0x05; -		nv->enode_mac[5] = 0x06 + ha->port_no; +		nv->enode_mac[5] = 0x06 + ha->port_no + 1;  		rval = 1;  	} @@ -5675,7 +5983,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)  		icb->enode_mac[2] = 0xDD;  		icb->enode_mac[3] = 0x04;  		icb->enode_mac[4] = 0x05; -		icb->enode_mac[5] = 0x06 + ha->port_no; +		icb->enode_mac[5] = 0x06 + ha->port_no + 1;  	}  	/* Use extended-initialization control block. */ @@ -5778,7 +6086,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)  		ha->login_retry_count = ql2xloginretrycount;  	/* if not running MSI-X we need handshaking on interrupts */ -	if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha)) +	if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))  		icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);  	/* Enable ZIO. */ @@ -5816,7 +6124,6 @@ int  qla82xx_restart_isp(scsi_qla_host_t *vha)  {  	int status, rval; -	uint32_t wait_time;  	struct qla_hw_data *ha = vha->hw;  	struct req_que *req = ha->req_q_map[0];  	struct rsp_que *rsp = ha->rsp_q_map[0]; @@ -5830,31 +6137,15 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)  		status = qla2x00_fw_ready(vha);  		if (!status) { -			ql_log(ql_log_info, vha, 0x803c, -			    "Start configure loop, status =%d.\n", status); -  			/* Issue a marker after FW becomes ready. */  			qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); -  			vha->flags.online = 1; -			/* Wait at most MAX_TARGET RSCNs for a stable link. */ -			wait_time = 256; -			do { -				clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); -				qla2x00_configure_loop(vha); -				wait_time--; -			} while (!atomic_read(&vha->loop_down_timer) && -			    !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) && -			    wait_time && -			    (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))); +			set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);  		}  		/* if no cable then assume it's good */  		if ((vha->device_flags & DFLG_NO_CABLE))  			status = 0; - -		ql_log(ql_log_info, vha, 0x8000, -		    "Configure loop done, status = 0x%x.\n", status);  	}  	if (!status) { @@ -5868,8 +6159,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)  			vha->marker_needed = 1;  		} -		vha->flags.online = 1; -  		ha->isp_ops->enable_intrs(ha);  		ha->isp_abort_cnt = 0; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 957088b0461..b3b1d6fc2d6 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -1,10 +1,11 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ +#include "qla_target.h"  /**   * qla24xx_calc_iocbs() - Determine number of Command Type 3 and   * Continuation Type 1 IOCBs to allocate. @@ -128,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {  }  static inline void -qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) +qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp, +	struct qla_tgt_cmd *tc)  {  	struct dsd_dma *dsd_ptr, *tdsd_ptr;  	struct crc_context *ctx; -	ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); +	if (sp) +		ctx = (struct crc_context *)GET_CMD_CTX_SP(sp); +	else if (tc) +		ctx = (struct crc_context *)tc->ctx; +	else { +		BUG(); +		return; +	}  	/* clean up allocated prev pool */  	list_for_each_entry_safe(dsd_ptr, tdsd_ptr, @@ -261,25 +270,6 @@ qla2x00_gid_list_size(struct qla_hw_data *ha)  }  static inline void -qla2x00_do_host_ramp_up(scsi_qla_host_t *vha) -{ -	if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth) -		return; - -	/* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */ -	if (time_before(jiffies, (vha->hw->host_last_rampdown_time + -	    HOST_QUEUE_RAMPDOWN_INTERVAL))) -		return; - -	/* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */ -	if (time_before(jiffies, (vha->hw->host_last_rampup_time + -	    HOST_QUEUE_RAMPUP_INTERVAL))) -		return; - -	set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags); -} - -static inline void  qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)  {  	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 46b9307e8be..76093152959 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -488,7 +488,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)  			req->ring_ptr++;  		/* Set chip new ring index. */ -		if (ha->mqenable || IS_QLA83XX(ha)) { +		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {  			WRT_REG_DWORD(req->req_q_in, req->ring_index);  			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);  		} else if (IS_QLAFX00(ha)) { @@ -524,7 +524,6 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,  {  	mrk_entry_t *mrk;  	struct mrk_entry_24xx *mrk24 = NULL; -	struct mrk_entry_fx00 *mrkfx = NULL;  	struct qla_hw_data *ha = vha->hw;  	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); @@ -541,15 +540,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,  	mrk->entry_type = MARKER_TYPE;  	mrk->modifier = type;  	if (type != MK_SYNC_ALL) { -		if (IS_QLAFX00(ha)) { -			mrkfx = (struct mrk_entry_fx00 *) mrk; -			mrkfx->handle = MAKE_HANDLE(req->id, mrkfx->handle); -			mrkfx->handle_hi = 0; -			mrkfx->tgt_id = cpu_to_le16(loop_id); -			mrkfx->lun[1] = LSB(lun); -			mrkfx->lun[2] = MSB(lun); -			host_to_fcp_swap(mrkfx->lun, sizeof(mrkfx->lun)); -		} else if (IS_FWI2_CAPABLE(ha)) { +		if (IS_FWI2_CAPABLE(ha)) {  			mrk24 = (struct mrk_entry_24xx *) mrk;  			mrk24->nport_handle = cpu_to_le16(loop_id);  			mrk24->lun[1] = LSB(lun); @@ -945,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,  	return 1;  } -static int +int  qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, -	uint32_t *dsd, uint16_t tot_dsds) +	uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)  {  	void *next_dsd;  	uint8_t avail_dsds = 0; @@ -957,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,  	uint32_t *cur_dsd = dsd;  	uint16_t	used_dsds = tot_dsds; -	uint32_t	prot_int; +	uint32_t	prot_int; /* protection interval */  	uint32_t	partial;  	struct qla2_sgx sgx;  	dma_addr_t	sle_dma;  	uint32_t	sle_dma_len, tot_prot_dma_len = 0; -	struct scsi_cmnd *cmd = GET_CMD_SP(sp); - -	prot_int = cmd->device->sector_size; +	struct scsi_cmnd *cmd; +	struct scsi_qla_host *vha;  	memset(&sgx, 0, sizeof(struct qla2_sgx)); -	sgx.tot_bytes = scsi_bufflen(cmd); -	sgx.cur_sg = scsi_sglist(cmd); -	sgx.sp = sp; - -	sg_prot = scsi_prot_sglist(cmd); +	if (sp) { +		vha = sp->fcport->vha; +		cmd = GET_CMD_SP(sp); +		prot_int = cmd->device->sector_size; + +		sgx.tot_bytes = scsi_bufflen(cmd); +		sgx.cur_sg = scsi_sglist(cmd); +		sgx.sp = sp; + +		sg_prot = scsi_prot_sglist(cmd); +	} else if (tc) { +		vha = tc->vha; +		prot_int      = tc->blk_sz; +		sgx.tot_bytes = tc->bufflen; +		sgx.cur_sg    = tc->sg; +		sg_prot	      = tc->prot_sg; +	} else { +		BUG(); +		return 1; +	}  	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { @@ -1004,10 +1009,18 @@ alloc_and_fill:  				return 1;  			} -			list_add_tail(&dsd_ptr->list, -			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); +			if (sp) { +				list_add_tail(&dsd_ptr->list, +				    &((struct crc_context *) +					    sp->u.scmd.ctx)->dsd_list); + +				sp->flags |= SRB_CRC_CTX_DSD_VALID; +			} else { +				list_add_tail(&dsd_ptr->list, +				    &(tc->ctx->dsd_list)); +				tc->ctx_dsd_alloced = 1; +			} -			sp->flags |= SRB_CRC_CTX_DSD_VALID;  			/* add new list to cmd iocb or last list */  			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1042,21 +1055,35 @@ alloc_and_fill:  	return 0;  } -static int +int  qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, -	uint16_t tot_dsds) +	uint16_t tot_dsds, struct qla_tgt_cmd *tc)  {  	void *next_dsd;  	uint8_t avail_dsds = 0;  	uint32_t dsd_list_len;  	struct dsd_dma *dsd_ptr; -	struct scatterlist *sg; +	struct scatterlist *sg, *sgl;  	uint32_t *cur_dsd = dsd;  	int	i;  	uint16_t	used_dsds = tot_dsds; -	struct scsi_cmnd *cmd = GET_CMD_SP(sp); +	struct scsi_cmnd *cmd; +	struct scsi_qla_host *vha; + +	if (sp) { +		cmd = GET_CMD_SP(sp); +		sgl = scsi_sglist(cmd); +		vha = sp->fcport->vha; +	} else if (tc) { +		sgl = tc->sg; +		vha = tc->vha; +	} else { +		BUG(); +		return 1; +	} -	scsi_for_each_sg(cmd, sg, tot_dsds, i) { + +	for_each_sg(sgl, sg, tot_dsds, i) {  		dma_addr_t	sle_dma;  		/* Allocate additional continuation packets? */ @@ -1085,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,  				return 1;  			} -			list_add_tail(&dsd_ptr->list, -			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); +			if (sp) { +				list_add_tail(&dsd_ptr->list, +				    &((struct crc_context *) +					    sp->u.scmd.ctx)->dsd_list); -			sp->flags |= SRB_CRC_CTX_DSD_VALID; +				sp->flags |= SRB_CRC_CTX_DSD_VALID; +			} else { +				list_add_tail(&dsd_ptr->list, +				    &(tc->ctx->dsd_list)); +				tc->ctx_dsd_alloced = 1; +			}  			/* add new list to cmd iocb or last list */  			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1111,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,  	return 0;  } -static int +int  qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, -							uint32_t *dsd, -	uint16_t tot_dsds) +	uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)  {  	void *next_dsd;  	uint8_t avail_dsds = 0;  	uint32_t dsd_list_len;  	struct dsd_dma *dsd_ptr; -	struct scatterlist *sg; +	struct scatterlist *sg, *sgl;  	int	i;  	struct scsi_cmnd *cmd;  	uint32_t *cur_dsd = dsd; -	uint16_t	used_dsds = tot_dsds; +	uint16_t used_dsds = tot_dsds; +	struct scsi_qla_host *vha; + +	if (sp) { +		cmd = GET_CMD_SP(sp); +		sgl = scsi_prot_sglist(cmd); +		vha = sp->fcport->vha; +	} else if (tc) { +		vha = tc->vha; +		sgl = tc->prot_sg; +	} else { +		BUG(); +		return 1; +	} -	cmd = GET_CMD_SP(sp); -	scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { +	ql_dbg(ql_dbg_tgt, vha, 0xe021, +		"%s: enter\n", __func__); + +	for_each_sg(sgl, sg, tot_dsds, i) {  		dma_addr_t	sle_dma;  		/* Allocate additional continuation packets? */ @@ -1156,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,  				return 1;  			} -			list_add_tail(&dsd_ptr->list, -			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list); +			if (sp) { +				list_add_tail(&dsd_ptr->list, +				    &((struct crc_context *) +					    sp->u.scmd.ctx)->dsd_list); -			sp->flags |= SRB_CRC_CTX_DSD_VALID; +				sp->flags |= SRB_CRC_CTX_DSD_VALID; +			} else { +				list_add_tail(&dsd_ptr->list, +				    &(tc->ctx->dsd_list)); +				tc->ctx_dsd_alloced = 1; +			}  			/* add new list to cmd iocb or last list */  			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); @@ -1395,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  	if (!bundling && tot_prot_dsds) {  		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, -		    cur_dsd, tot_dsds)) +			cur_dsd, tot_dsds, NULL))  			goto crc_queuing_error;  	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd, -	    (tot_dsds - tot_prot_dsds))) +			(tot_dsds - tot_prot_dsds), NULL))  		goto crc_queuing_error;  	if (bundling && tot_prot_dsds) { @@ -1407,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,  			__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);  		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;  		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, -		    tot_prot_dsds)) +				tot_prot_dsds, NULL))  			goto crc_queuing_error;  	}  	return QLA_SUCCESS; @@ -1487,8 +1542,8 @@ qla24xx_start_scsi(srb_t *sp)  	tot_dsds = nseg;  	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);  	if (req->cnt < (req_cnt + 2)) { -		cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - +		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : +		    RD_REG_DWORD_RELAXED(req->req_q_out);  		if (req->ring_index < cnt)  			req->cnt = cnt - req->ring_index;  		else @@ -1706,8 +1761,8 @@ qla24xx_dif_start_scsi(srb_t *sp)  	tot_prot_dsds = nseg;  	tot_dsds += nseg;  	if (req->cnt < (req_cnt + 2)) { -		cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - +		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : +		    RD_REG_DWORD_RELAXED(req->req_q_out);  		if (req->ring_index < cnt)  			req->cnt = cnt - req->ring_index;  		else @@ -1823,7 +1878,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)  	/* Check for room in outstanding command list. */  	handle = req->current_outstanding_cmd; -	for (index = 1; req->num_outstanding_cmds; index++) { +	for (index = 1; index < req->num_outstanding_cmds; index++) {  		handle++;  		if (handle == req->num_outstanding_cmds)  			handle = 1; @@ -1848,7 +1903,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)  skip_cmd_array:  	/* Check for room on request queue. */  	if (req->cnt < req_cnt) { -		if (ha->mqenable || IS_QLA83XX(ha)) +		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))  			cnt = RD_REG_DWORD(®->isp25mq.req_q_out);  		else if (IS_P3P_TYPE(ha))  			cnt = RD_REG_DWORD(®->isp82.req_q_out); @@ -2594,6 +2649,29 @@ queuing_error:  	return QLA_FUNCTION_FAILED;  } +void +qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) +{ +	struct srb_iocb *aio = &sp->u.iocb_cmd; +	scsi_qla_host_t *vha = sp->fcport->vha; +	struct req_que *req = vha->req; + +	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); +	abt_iocb->entry_type = ABORT_IOCB_TYPE; +	abt_iocb->entry_count = 1; +	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); +	abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); +	abt_iocb->handle_to_abort = +	    cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); +	abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; +	abt_iocb->port_id[1] = sp->fcport->d_id.b.area; +	abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; +	abt_iocb->vp_index = vha->vp_idx; +	abt_iocb->req_que_no = cpu_to_le16(req->id); +	/* Send the command to the firmware */ +	wmb(); +} +  int  qla2x00_start_sp(srb_t *sp)  { @@ -2647,7 +2725,9 @@ qla2x00_start_sp(srb_t *sp)  		qlafx00_fxdisc_iocb(sp, pkt);  		break;  	case SRB_ABT_CMD: -		qlafx00_abort_iocb(sp, pkt); +		IS_QLAFX00(ha) ? +			qlafx00_abort_iocb(sp, pkt) : +			qla24xx_abort_iocb(sp, pkt);  		break;  	default:  		break; @@ -2809,8 +2889,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)  	/* Check for room on request queue. */  	if (req->cnt < req_cnt + 2) { -		cnt = RD_REG_DWORD_RELAXED(req->req_q_out); - +		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : +		    RD_REG_DWORD_RELAXED(req->req_q_out);  		if  (req->ring_index < cnt)  			req->cnt = cnt - req->ring_index;  		else diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index df1b30ba938..a56825c73c3 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -56,6 +56,16 @@ qla2100_intr_handler(int irq, void *dev_id)  	vha = pci_get_drvdata(ha->pdev);  	for (iter = 50; iter--; ) {  		hccr = RD_REG_WORD(®->hccr); +		/* Check for PCI disconnection */ +		if (hccr == 0xffff) { +			/* +			 * Schedule this on the default system workqueue so that +			 * all the adapter workqueues and the DPC thread can be +			 * shutdown cleanly. +			 */ +			schedule_work(&ha->board_disable); +			break; +		}  		if (hccr & HCCR_RISC_PAUSE) {  			if (pci_channel_offline(ha->pdev))  				break; @@ -110,6 +120,22 @@ qla2100_intr_handler(int irq, void *dev_id)  	return (IRQ_HANDLED);  } +bool +qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) +{ +	/* Check for PCI disconnection */ +	if (reg == 0xffffffff) { +		/* +		 * Schedule this on the default system workqueue so that all the +		 * adapter workqueues and the DPC thread can be shutdown +		 * cleanly. +		 */ +		schedule_work(&vha->hw->board_disable); +		return true; +	} else +		return false; +} +  /**   * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.   * @irq: @@ -148,11 +174,14 @@ qla2300_intr_handler(int irq, void *dev_id)  	vha = pci_get_drvdata(ha->pdev);  	for (iter = 50; iter--; ) {  		stat = RD_REG_DWORD(®->u.isp2300.host_status); +		if (qla2x00_check_reg_for_disconnect(vha, stat)) +			break;  		if (stat & HSR_RISC_PAUSED) {  			if (unlikely(pci_channel_offline(ha->pdev)))  				break;  			hccr = RD_REG_WORD(®->hccr); +  			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))  				ql_log(ql_log_warn, vha, 0x5026,  				    "Parity error -- HCCR=%x, Dumping " @@ -269,11 +298,18 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)  		{ "Complete", "Request Notification", "Time Extension" };  	int rval;  	struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; +	struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82;  	uint16_t __iomem *wptr;  	uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];  	/* Seed data -- mailbox1 -> mailbox7. */ -	wptr = (uint16_t __iomem *)®24->mailbox1; +	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) +		wptr = (uint16_t __iomem *)®24->mailbox1; +	else if (IS_QLA8044(vha->hw)) +		wptr = (uint16_t __iomem *)®82->mailbox_out[1]; +	else +		return; +  	for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)  		mb[cnt] = RD_REG_WORD(wptr); @@ -287,7 +323,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)  	case MBA_IDC_COMPLETE:  		if (mb[1] >> 15) {  			vha->hw->flags.idc_compl_status = 1; -			if (vha->hw->notify_dcbx_comp) +			if (vha->hw->notify_dcbx_comp && !vha->vp_idx)  				complete(&vha->hw->dcbx_comp);  		}  		break; @@ -320,15 +356,16 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)  const char *  qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)  { -	static const char * const link_speeds[] = { -		"1", "2", "?", "4", "8", "16", "10" +	static const char *const link_speeds[] = { +		"1", "2", "?", "4", "8", "16", "32", "10"  	}; +#define	QLA_LAST_SPEED	7  	if (IS_QLA2100(ha) || IS_QLA2200(ha))  		return link_speeds[0];  	else if (speed == 0x13) -		return link_speeds[6]; -	else if (speed < 6) +		return link_speeds[QLA_LAST_SPEED]; +	else if (speed < QLA_LAST_SPEED)  		return link_speeds[speed];  	else  		return link_speeds[LS_UNKNOWN]; @@ -613,7 +650,7 @@ skip_rio:  		break;  	case MBA_SYSTEM_ERR:		/* System Error */ -		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? +		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?  			RD_REG_WORD(®24->mailbox7) : 0;  		ql_log(ql_log_warn, vha, 0x5003,  		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " @@ -630,7 +667,7 @@ skip_rio:  				vha->device_flags |= DFLG_DEV_FAILED;  			} else {  				/* Check to see if MPI timeout occurred */ -				if ((mbx & MBX_3) && (ha->flags.port0)) +				if ((mbx & MBX_3) && (ha->port_no == 0))  					set_bit(MPI_RESET_NEEDED,  					    &vha->dpc_flags); @@ -758,7 +795,7 @@ skip_rio:  			ql_dbg(ql_dbg_async, vha, 0x500d,  			    "DCBX Completed -- %04x %04x %04x.\n",  			    mb[1], mb[2], mb[3]); -			if (ha->notify_dcbx_comp) +			if (ha->notify_dcbx_comp && !vha->vp_idx)  				complete(&ha->dcbx_comp);  		} else @@ -1032,7 +1069,7 @@ skip_rio:  			}  		}  	case MBA_IDC_COMPLETE: -		if (ha->notify_lb_portup_comp) +		if (ha->notify_lb_portup_comp && !vha->vp_idx)  			complete(&ha->lb_portup_comp);  		/* Fallthru */  	case MBA_IDC_TIME_EXT: @@ -1461,8 +1498,7 @@ logio_done:  }  static void -qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, -    struct tsk_mgmt_entry *tsk) +qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)  {  	const char func[] = "TMF-IOCB";  	const char *type; @@ -1470,7 +1506,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,  	srb_t *sp;  	struct srb_iocb *iocb;  	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; -	int error = 1;  	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);  	if (!sp) @@ -1479,37 +1514,35 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,  	iocb = &sp->u.iocb_cmd;  	type = sp->name;  	fcport = sp->fcport; +	iocb->u.tmf.data = QLA_SUCCESS;  	if (sts->entry_status) {  		ql_log(ql_log_warn, fcport->vha, 0x5038,  		    "Async-%s error - hdl=%x entry-status(%x).\n",  		    type, sp->handle, sts->entry_status); +		iocb->u.tmf.data = QLA_FUNCTION_FAILED;  	} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {  		ql_log(ql_log_warn, fcport->vha, 0x5039,  		    "Async-%s error - hdl=%x completion status(%x).\n",  		    type, sp->handle, sts->comp_status); -	} else if (!(le16_to_cpu(sts->scsi_status) & +		iocb->u.tmf.data = QLA_FUNCTION_FAILED; +	} else if ((le16_to_cpu(sts->scsi_status) &  	    SS_RESPONSE_INFO_LEN_VALID)) { -		ql_log(ql_log_warn, fcport->vha, 0x503a, -		    "Async-%s error - hdl=%x no response info(%x).\n", -		    type, sp->handle, sts->scsi_status); -	} else if (le32_to_cpu(sts->rsp_data_len) < 4) { -		ql_log(ql_log_warn, fcport->vha, 0x503b, -		    "Async-%s error - hdl=%x not enough response(%d).\n", -		    type, sp->handle, sts->rsp_data_len); -	} else if (sts->data[3]) { -		ql_log(ql_log_warn, fcport->vha, 0x503c, -		    "Async-%s error - hdl=%x response(%x).\n", -		    type, sp->handle, sts->data[3]); -	} else { -		error = 0; +		if (le32_to_cpu(sts->rsp_data_len) < 4) { +			ql_log(ql_log_warn, fcport->vha, 0x503b, +			    "Async-%s error - hdl=%x not enough response(%d).\n", +			    type, sp->handle, sts->rsp_data_len); +		} else if (sts->data[3]) { +			ql_log(ql_log_warn, fcport->vha, 0x503c, +			    "Async-%s error - hdl=%x response(%x).\n", +			    type, sp->handle, sts->data[3]); +		iocb->u.tmf.data = QLA_FUNCTION_FAILED; +		}  	} -	if (error) { -		iocb->u.tmf.data = error; +	if (iocb->u.tmf.data != QLA_SUCCESS)  		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,  		    (uint8_t *)sts, sizeof(*sts)); -	}  	sp->done(vha, sp, 0);  } @@ -1957,6 +1990,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)  	que = MSW(sts->handle);  	req = ha->req_q_map[que]; +	/* Check for invalid queue pointer */ +	if (req == NULL || +	    que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { +		ql_dbg(ql_dbg_io, vha, 0x3059, +		    "Invalid status handle (0x%x): Bad req pointer. req=%p, " +		    "que=%u.\n", sts->handle, req, que); +		return; +	} +  	/* Validate handle. */  	if (handle < req->num_outstanding_cmds)  		sp = req->outstanding_cmds[handle]; @@ -1967,11 +2009,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)  		ql_dbg(ql_dbg_io, vha, 0x3017,  		    "Invalid status handle (0x%x).\n", sts->handle); -		if (IS_P3P_TYPE(ha)) -			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); -		else -			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); -		qla2xxx_wake_dpc(vha); +		if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { +			if (IS_P3P_TYPE(ha)) +				set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); +			else +				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); +			qla2xxx_wake_dpc(vha); +		}  		return;  	} @@ -1980,9 +2024,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)  		return;  	} +	/* Task Management completion. */ +	if (sp->type == SRB_TM_CMD) { +		qla24xx_tm_iocb_entry(vha, req, pkt); +		return; +	} +  	/* Fast path completion. */  	if (comp_status == CS_COMPLETE && scsi_status == 0) { -		qla2x00_do_host_ramp_up(vha);  		qla2x00_process_completed_request(vha, req, handle);  		return; @@ -2241,9 +2290,6 @@ out:  		    cp->cmnd, scsi_bufflen(cp), rsp_info_len,  		    resid_len, fw_resid_len); -	if (!res) -		qla2x00_do_host_ramp_up(vha); -  	if (rsp->status_srb == NULL)  		sp->done(ha, sp, res);  } @@ -2384,6 +2430,23 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)  	}  } +static void +qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, +	struct abort_entry_24xx *pkt) +{ +	const char func[] = "ABT_IOCB"; +	srb_t *sp; +	struct srb_iocb *abt; + +	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); +	if (!sp) +		return; + +	abt = &sp->u.iocb_cmd; +	abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); +	sp->done(vha, sp, 0); +} +  /**   * qla24xx_process_response_queue() - Process response queue entries.   * @ha: SCSI driver HA context @@ -2411,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,  		if (pkt->entry_status != 0) {  			qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); -			(void)qlt_24xx_process_response_error(vha, pkt); +			if (qlt_24xx_process_response_error(vha, pkt)) +				goto process_err;  			((response_t *)pkt)->signature = RESPONSE_PROCESSED;  			wmb();  			continue;  		} +process_err:  		switch (pkt->entry_type) {  		case STATUS_TYPE: @@ -2433,14 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,  			qla24xx_logio_entry(vha, rsp->req,  			    (struct logio_entry_24xx *)pkt);  			break; -		case TSK_MGMT_IOCB_TYPE: -			qla24xx_tm_iocb_entry(vha, rsp->req, -			    (struct tsk_mgmt_entry *)pkt); -			break; -                case CT_IOCB_TYPE: +		case CT_IOCB_TYPE:  			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);  			break; -                case ELS_IOCB_TYPE: +		case ELS_IOCB_TYPE:  			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);  			break;  		case ABTS_RECV_24XX: @@ -2449,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,  		case ABTS_RESP_24XX:  		case CTIO_TYPE7:  		case NOTIFY_ACK_TYPE: +		case CTIO_CRC2:  			qlt_response_pkt_all_vps(vha, (response_t *)pkt);  			break;  		case MARKER_TYPE: @@ -2456,6 +2518,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,  			 * from falling into default case  			 */  			break; +		case ABORT_IOCB_TYPE: +			qla24xx_abort_iocb_entry(vha, rsp->req, +			    (struct abort_entry_24xx *)pkt); +			break;  		default:  			/* Type Not Supported. */  			ql_dbg(ql_dbg_async, vha, 0x5042, @@ -2484,7 +2550,8 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)  	struct qla_hw_data *ha = vha->hw;  	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; -	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) +	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && +	    !IS_QLA27XX(ha))  		return;  	rval = QLA_SUCCESS; @@ -2566,6 +2633,8 @@ qla24xx_intr_handler(int irq, void *dev_id)  	vha = pci_get_drvdata(ha->pdev);  	for (iter = 50; iter--; ) {  		stat = RD_REG_DWORD(®->host_status); +		if (qla2x00_check_reg_for_disconnect(vha, stat)) +			break;  		if (stat & HSRX_RISC_PAUSED) {  			if (unlikely(pci_channel_offline(ha->pdev)))  				break; @@ -2635,6 +2704,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)  	struct device_reg_24xx __iomem *reg;  	struct scsi_qla_host *vha;  	unsigned long flags; +	uint32_t stat = 0;  	rsp = (struct rsp_que *) dev_id;  	if (!rsp) { @@ -2648,11 +2718,19 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)  	spin_lock_irqsave(&ha->hardware_lock, flags);  	vha = pci_get_drvdata(ha->pdev); +	/* +	 * Use host_status register to check to PCI disconnection before we +	 * we process the response queue. +	 */ +	stat = RD_REG_DWORD(®->host_status); +	if (qla2x00_check_reg_for_disconnect(vha, stat)) +		goto out;  	qla24xx_process_response_queue(vha, rsp);  	if (!ha->flags.disable_msix_handshake) {  		WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT);  		RD_REG_DWORD_RELAXED(®->hccr);  	} +out:  	spin_unlock_irqrestore(&ha->hardware_lock, flags);  	return IRQ_HANDLED; @@ -2662,9 +2740,11 @@ static irqreturn_t  qla25xx_msix_rsp_q(int irq, void *dev_id)  {  	struct qla_hw_data *ha; +	scsi_qla_host_t *vha;  	struct rsp_que *rsp;  	struct device_reg_24xx __iomem *reg;  	unsigned long flags; +	uint32_t hccr = 0;  	rsp = (struct rsp_que *) dev_id;  	if (!rsp) { @@ -2673,17 +2753,21 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)  		return IRQ_NONE;  	}  	ha = rsp->hw; +	vha = pci_get_drvdata(ha->pdev);  	/* Clear the interrupt, if enabled, for this response queue */  	if (!ha->flags.disable_msix_handshake) {  		reg = &ha->iobase->isp24;  		spin_lock_irqsave(&ha->hardware_lock, flags);  		WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); -		RD_REG_DWORD_RELAXED(®->hccr); +		hccr = RD_REG_DWORD_RELAXED(®->hccr);  		spin_unlock_irqrestore(&ha->hardware_lock, flags);  	} +	if (qla2x00_check_reg_for_disconnect(vha, hccr)) +		goto out;  	queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); +out:  	return IRQ_HANDLED;  } @@ -2714,6 +2798,8 @@ qla24xx_msix_default(int irq, void *dev_id)  	vha = pci_get_drvdata(ha->pdev);  	do {  		stat = RD_REG_DWORD(®->host_status); +		if (qla2x00_check_reg_for_disconnect(vha, stat)) +			break;  		if (stat & HSRX_RISC_PAUSED) {  			if (unlikely(pci_channel_offline(ha->pdev)))  				break; @@ -2820,6 +2906,7 @@ static int  qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)  {  #define MIN_MSIX_COUNT	2 +#define ATIO_VECTOR	2  	int i, ret;  	struct msix_entry *entries;  	struct qla_msix_entry *qentry; @@ -2876,36 +2963,49 @@ msix_failed:  	}  	/* Enable MSI-X vectors for the base queue */ -	for (i = 0; i < ha->msix_count; i++) { +	for (i = 0; i < 2; i++) {  		qentry = &ha->msix_entries[i]; -		if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { -			ret = request_irq(qentry->vector, -				qla83xx_msix_entries[i].handler, -				0, qla83xx_msix_entries[i].name, rsp); -		} else if (IS_P3P_TYPE(ha)) { +		if (IS_P3P_TYPE(ha))  			ret = request_irq(qentry->vector,  				qla82xx_msix_entries[i].handler,  				0, qla82xx_msix_entries[i].name, rsp); -		} else { +		else  			ret = request_irq(qentry->vector,  				msix_entries[i].handler,  				0, msix_entries[i].name, rsp); -		} -		if (ret) { -			ql_log(ql_log_fatal, vha, 0x00cb, -			    "MSI-X: unable to register handler -- %x/%d.\n", -			    qentry->vector, ret); -			qla24xx_disable_msix(ha); -			ha->mqenable = 0; -			goto msix_out; -		} +		if (ret) +			goto msix_register_fail;  		qentry->have_irq = 1;  		qentry->rsp = rsp;  		rsp->msix = qentry;  	} +	/* +	 * If target mode is enable, also request the vector for the ATIO +	 * queue. +	 */ +	if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { +		qentry = &ha->msix_entries[ATIO_VECTOR]; +		ret = request_irq(qentry->vector, +			qla83xx_msix_entries[ATIO_VECTOR].handler, +			0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); +		qentry->have_irq = 1; +		qentry->rsp = rsp; +		rsp->msix = qentry; +	} + +msix_register_fail: +	if (ret) { +		ql_log(ql_log_fatal, vha, 0x00cb, +		    "MSI-X: unable to register handler -- %x/%d.\n", +		    qentry->vector, ret); +		qla24xx_disable_msix(ha); +		ha->mqenable = 0; +		goto msix_out; +	} +  	/* Enable MSI-X vector for response queue update for queue 0 */ -	if (IS_QLA83XX(ha)) { +	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {  		if (ha->msixbase && ha->mqiobase &&  		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))  			ha->mqenable = 1; @@ -2928,13 +3028,14 @@ msix_out:  int  qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)  { -	int ret; -	device_reg_t __iomem *reg = ha->iobase; +	int ret = QLA_FUNCTION_FAILED; +	device_reg_t *reg = ha->iobase;  	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);  	/* If possible, enable MSI-X. */  	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && -		!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha)) +	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && +	    !IS_QLA27XX(ha))  		goto skip_msi;  	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && @@ -2962,12 +3063,15 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)  		    ha->chip_revision, ha->fw_attributes);  		goto clear_risc_ints;  	} -	ql_log(ql_log_info, vha, 0x0037, -	    "MSI-X Falling back-to MSI mode -%d.\n", ret); +  skip_msix: +	ql_log(ql_log_info, vha, 0x0037, +	    "Falling back-to MSI mode -%d.\n", ret); +  	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && -	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha)) +	    !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && +	    !IS_QLA27XX(ha))  		goto skip_msi;  	ret = pci_enable_msi(ha->pdev); @@ -2977,14 +3081,13 @@ skip_msix:  		ha->flags.msi_enabled = 1;  	} else  		ql_log(ql_log_warn, vha, 0x0039, -		    "MSI-X; Falling back-to INTa mode -- %d.\n", ret); +		    "Falling back-to INTa mode -- %d.\n", ret); +skip_msi:  	/* Skip INTx on ISP82xx. */  	if (!ha->flags.msi_enabled && IS_QLA82XX(ha))  		return QLA_FUNCTION_FAILED; -skip_msi: -  	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,  	    ha->flags.msi_enabled ? 0 : IRQF_SHARED,  	    QLA2XXX_DRIVER_NAME, rsp); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index a9aae500e79..1c33a77db5c 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -35,7 +35,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)  {  	int		rval;  	unsigned long    flags = 0; -	device_reg_t __iomem *reg; +	device_reg_t *reg;  	uint8_t		abort_active;  	uint8_t		io_lock_on;  	uint16_t	command = 0; @@ -468,7 +468,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)  		mcp->mb[1] = MSW(risc_addr);  		mcp->mb[2] = LSW(risc_addr);  		mcp->mb[3] = 0; -		if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) { +		if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || +		    IS_QLA27XX(ha)) {  			struct nvram_81xx *nv = ha->nvram;  			mcp->mb[4] = (nv->enhanced_features &  			    EXTENDED_BB_CREDITS); @@ -539,6 +540,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)  		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;  	if (IS_FWI2_CAPABLE(ha))  		mcp->in_mb |= MBX_17|MBX_16|MBX_15; +	if (IS_QLA27XX(ha)) +		mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18;  	mcp->flags = 0;  	mcp->tov = MBX_TOV_SECONDS;  	rval = qla2x00_mailbox_command(vha, mcp); @@ -574,6 +577,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)  		    "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",  		    __func__, mcp->mb[17], mcp->mb[16]);  	} +	if (IS_QLA27XX(ha)) { +		ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; +		ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; +	}  failed:  	if (rval != QLA_SUCCESS) { @@ -1214,7 +1221,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)  	mcp->mb[6] = MSW(MSD(ha->init_cb_dma));  	mcp->mb[7] = LSW(MSD(ha->init_cb_dma));  	mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; -	if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) { +	if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {  		mcp->mb[1] = BIT_0;  		mcp->mb[10] = MSW(ha->ex_init_cb_dma);  		mcp->mb[11] = LSW(ha->ex_init_cb_dma); @@ -1225,7 +1232,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)  	}  	/* 1 and 2 should normally be captured. */  	mcp->in_mb = MBX_2|MBX_1|MBX_0; -	if (IS_QLA83XX(ha)) +	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))  		/* mb3 is additional info about the installed SFP. */  		mcp->in_mb  |= MBX_3;  	mcp->buf_size = size; @@ -1312,7 +1319,7 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)  		left = 0; -		list = kzalloc(dma_size, GFP_KERNEL); +		list = kmemdup(pmap, dma_size, GFP_KERNEL);  		if (!list) {  			ql_log(ql_log_warn, vha, 0x1140,  			    "%s(%ld): failed to allocate node names list " @@ -1321,7 +1328,6 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)  			goto out_free;  		} -		memcpy(list, pmap, dma_size);  restart:  		dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);  	} @@ -2349,7 +2355,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,  	mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;  	mcp->out_mb = MBX_0;  	mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; -	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) +	if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))  		mcp->in_mb |= MBX_12;  	mcp->tov = MBX_TOV_SECONDS;  	mcp->flags = 0; @@ -2590,6 +2596,9 @@ qla24xx_abort_command(srb_t *sp)  	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,  	    "Entered %s.\n", __func__); +	if (ql2xasynctmfenable) +		return qla24xx_async_abort_command(sp); +  	spin_lock_irqsave(&ha->hardware_lock, flags);  	for (handle = 1; handle < req->num_outstanding_cmds; handle++) {  		if (req->outstanding_cmds[handle] == sp) @@ -2634,7 +2643,10 @@ qla24xx_abort_command(srb_t *sp)  		ql_dbg(ql_dbg_mbx, vha, 0x1090,  		    "Failed to complete IOCB -- completion status (%x).\n",  		    le16_to_cpu(abt->nport_handle)); -		rval = QLA_FUNCTION_FAILED; +		if (abt->nport_handle == CS_IOCB_ERROR) +			rval = QLA_FUNCTION_PARAMETER_ERROR; +		else +			rval = QLA_FUNCTION_FAILED;  	} else {  		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,  		    "Done %s.\n", __func__); @@ -2800,6 +2812,147 @@ qla2x00_system_error(scsi_qla_host_t *vha)  	return rval;  } +int +qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) +{ +	int rval; +	mbx_cmd_t mc; +	mbx_cmd_t *mcp = &mc; + +	if (!IS_QLA2031(vha->hw)) +		return QLA_FUNCTION_FAILED; + +	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, +	    "Entered %s.\n", __func__); + +	mcp->mb[0] = MBC_WRITE_SERDES; +	mcp->mb[1] = addr; +	mcp->mb[2] = data & 0xff; +	mcp->mb[3] = 0; +	mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; +	mcp->in_mb = MBX_0; +	mcp->tov = MBX_TOV_SECONDS; +	mcp->flags = 0; +	rval = qla2x00_mailbox_command(vha, mcp); + +	if (rval != QLA_SUCCESS) { +		ql_dbg(ql_dbg_mbx, vha, 0x1183, +		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); +	} else { +		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1184, +		    "Done %s.\n", __func__); +	} + +	return rval; +} + +int +qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) +{ +	int rval; +	mbx_cmd_t mc; +	mbx_cmd_t *mcp = &mc; + +	if (!IS_QLA2031(vha->hw)) +		return QLA_FUNCTION_FAILED; + +	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, +	    "Entered %s.\n", __func__); + +	mcp->mb[0] = MBC_READ_SERDES; +	mcp->mb[1] = addr; +	mcp->mb[3] = 0; +	mcp->out_mb = MBX_3|MBX_1|MBX_0; +	mcp->in_mb = MBX_1|MBX_0; +	mcp->tov = MBX_TOV_SECONDS; +	mcp->flags = 0; +	rval = qla2x00_mailbox_command(vha, mcp); + +	*data = mcp->mb[1] & 0xff; + +	if (rval != QLA_SUCCESS) { +		ql_dbg(ql_dbg_mbx, vha, 0x1186, +		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); +	} else { +		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1187, +		    "Done %s.\n", __func__); +	} + +	return rval; +} + +int +qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data) +{ +	int rval; +	mbx_cmd_t mc; +	mbx_cmd_t *mcp = &mc; + +	if (!IS_QLA8044(vha->hw)) +		return QLA_FUNCTION_FAILED; + +	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186, +	    "Entered %s.\n", __func__); + +	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; +	mcp->mb[1] = HCS_WRITE_SERDES; +	mcp->mb[3] = LSW(addr); +	mcp->mb[4] = MSW(addr); +	mcp->mb[5] = LSW(data); +	mcp->mb[6] = MSW(data); +	mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0; +	mcp->in_mb = MBX_0; +	mcp->tov = MBX_TOV_SECONDS; +	mcp->flags = 0; +	rval = qla2x00_mailbox_command(vha, mcp); + +	if (rval != QLA_SUCCESS) { +		ql_dbg(ql_dbg_mbx, vha, 0x1187, +		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); +	} else { +		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188, +		    "Done %s.\n", __func__); +	} + +	return rval; +} + +int +qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data) +{ +	int rval; +	mbx_cmd_t mc; +	mbx_cmd_t *mcp = &mc; + +	if (!IS_QLA8044(vha->hw)) +		return QLA_FUNCTION_FAILED; + +	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189, +	    "Entered %s.\n", __func__); + +	mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG; +	mcp->mb[1] = HCS_READ_SERDES; +	mcp->mb[3] = LSW(addr); +	mcp->mb[4] = MSW(addr); +	mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0; +	mcp->in_mb = MBX_2|MBX_1|MBX_0; +	mcp->tov = MBX_TOV_SECONDS; +	mcp->flags = 0; +	rval = qla2x00_mailbox_command(vha, mcp); + +	*data = mcp->mb[2] << 16 | mcp->mb[1]; + +	if (rval != QLA_SUCCESS) { +		ql_dbg(ql_dbg_mbx, vha, 0x118a, +		    "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); +	} else { +		ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b, +		    "Done %s.\n", __func__); +	} + +	return rval; +} +  /**   * qla2x00_set_serdes_params() -   * @ha: HA context @@ -2963,7 +3116,7 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,  	    "Entered %s.\n", __func__);  	if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && -	    !IS_QLA83XX(vha->hw)) +	    !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))  		return QLA_FUNCTION_FAILED;  	if (unlikely(pci_channel_offline(vha->hw->pdev))) @@ -3581,6 +3734,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)  	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,  	    "Entered %s.\n", __func__); +	if (IS_SHADOW_REG_CAPABLE(ha)) +		req->options |= BIT_13; +  	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;  	mcp->mb[1] = req->options;  	mcp->mb[2] = MSW(LSD(req->dma)); @@ -3593,23 +3749,23 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)  	mcp->mb[12] = req->qos;  	mcp->mb[11] = req->vp_idx;  	mcp->mb[13] = req->rid; -	if (IS_QLA83XX(ha)) +	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))  		mcp->mb[15] = 0;  	mcp->mb[4] = req->id;  	/* que in ptr index */  	mcp->mb[8] = 0;  	/* que out ptr index */ -	mcp->mb[9] = 0; +	mcp->mb[9] = *req->out_ptr = 0;  	mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|  			MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;  	mcp->in_mb = MBX_0;  	mcp->flags = MBX_DMA_OUT;  	mcp->tov = MBX_TOV_SECONDS * 2; -	if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) +	if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))  		mcp->in_mb |= MBX_1; -	if (IS_QLA83XX(ha)) { +	if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {  		mcp->out_mb |= MBX_15;  		/* debug q create issue in SR-IOV */  		mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; @@ -3618,7 +3774,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)  	spin_lock_irqsave(&ha->hardware_lock, flags);  	if (!(req->options & BIT_0)) {  		WRT_REG_DWORD(req->req_q_in, 0); -		if (!IS_QLA83XX(ha)) +		if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))  			WRT_REG_DWORD(req->req_q_out, 0);  	}  	spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3647,6 +3803,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)  	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,  	    "Entered %s.\n", __func__); +	if (IS_SHADOW_REG_CAPABLE(ha)) +		rsp->options |= BIT_13; +  	mcp->mb[0] = MBC_INITIALIZE_MULTIQ;  	mcp->mb[1] = rsp->options;  	mcp->mb[2] = MSW(LSD(rsp->dma)); @@ -3656,12 +3815,12 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)  	mcp->mb[5] = rsp->length;  	mcp->mb[14] = rsp->msix->entry;  	mcp->mb[13] = rsp->rid; -	if (IS_QLA83XX(ha)) +	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))  		mcp->mb[15] = 0;  	mcp->mb[4] = rsp->id;  	/* que in ptr index */ -	mcp->mb[8] = 0; +	mcp->mb[8] = *rsp->in_ptr = 0;  	/* que out ptr index */  	mcp->mb[9] = 0;  	mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7 @@ -3673,7 +3832,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)  	if (IS_QLA81XX(ha)) {  		mcp->out_mb |= MBX_12|MBX_11|MBX_10;  		mcp->in_mb |= MBX_1; -	} else if (IS_QLA83XX(ha)) { +	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {  		mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;  		mcp->in_mb |= MBX_1;  		/* debug q create issue in SR-IOV */ @@ -3740,7 +3899,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)  	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,  	    "Entered %s.\n", __func__); -	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) +	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && +	    !IS_QLA27XX(vha->hw))  		return QLA_FUNCTION_FAILED;  	mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; @@ -3771,7 +3931,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)  	mbx_cmd_t mc;  	mbx_cmd_t *mcp = &mc; -	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) +	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && +	    !IS_QLA27XX(vha->hw))  		return QLA_FUNCTION_FAILED;  	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, @@ -3805,7 +3966,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)  	mbx_cmd_t mc;  	mbx_cmd_t *mcp = &mc; -	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) +	if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && +	    !IS_QLA27XX(vha->hw))  		return QLA_FUNCTION_FAILED;  	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, @@ -4476,7 +4638,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)  	mcp->mb[1] = 0;  	mcp->out_mb = MBX_1|MBX_0;  	mcp->in_mb = MBX_2|MBX_1|MBX_0; -	if (IS_QLA83XX(ha)) +	if (IS_QLA83XX(ha) || IS_QLA27XX(ha))  		mcp->in_mb |= MBX_3;  	mcp->tov = MBX_TOV_SECONDS;  	mcp->flags = 0; @@ -4505,7 +4667,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)  	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,  	    "Entered %s.\n", __func__); -	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha)) +	if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && +	    !IS_QLA27XX(ha))  		return QLA_FUNCTION_FAILED;  	mcp->mb[0] = MBC_GET_PORT_CONFIG;  	mcp->out_mb = MBX_0; @@ -5001,7 +5164,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)  	mbx_cmd_t mc;  	mbx_cmd_t *mcp = &mc; -	if (!IS_QLA83XX(ha)) +	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))  		return QLA_FUNCTION_FAILED;  	ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, @@ -5076,7 +5239,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)  	struct qla_hw_data *ha = vha->hw;  	unsigned long retry_max_time = jiffies + (2 * HZ); -	if (!IS_QLA83XX(ha)) +	if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))  		return QLA_FUNCTION_FAILED;  	ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index a72df701fb3..89998244f48 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -630,7 +630,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,  	struct req_que *req = NULL;  	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);  	uint16_t que_id = 0; -	device_reg_t __iomem *reg; +	device_reg_t *reg;  	uint32_t cnt;  	req = kzalloc(sizeof(struct req_que), GFP_KERNEL); @@ -754,7 +754,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,  	struct rsp_que *rsp = NULL;  	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);  	uint16_t que_id = 0; -	device_reg_t __iomem *reg; +	device_reg_t *reg;  	rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);  	if (rsp == NULL) { diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 62ee7131b20..abeb3901498 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -40,7 +40,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)  {  	int		rval;  	unsigned long    flags = 0; -	device_reg_t __iomem *reg; +	device_reg_t *reg;  	uint8_t		abort_active;  	uint8_t		io_lock_on;  	uint16_t	command = 0; @@ -507,7 +507,7 @@ qlafx00_pci_config(scsi_qla_host_t *vha)  	pci_write_config_word(ha->pdev, PCI_COMMAND, w);  	/* PCIe -- adjust Maximum Read Request Size (2048). */ -	if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP)) +	if (pci_is_pcie(ha->pdev))  		pcie_set_readrq(ha->pdev, 2048);  	ha->chip_revision = ha->pdev->revision; @@ -527,21 +527,63 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)  	struct qla_hw_data *ha = vha->hw;  	int i, core;  	uint32_t cnt; +	uint32_t reg_val; + +	spin_lock_irqsave(&ha->hardware_lock, flags); + +	QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0); +	QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0); + +	/* stop the XOR DMA engines */ +	QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02); +	QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02); +	QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02); +	QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02); + +	/* stop the IDMA engines */ +	reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840); +	reg_val &= ~(1<<12); +	QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val); + +	reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844); +	reg_val &= ~(1<<12); +	QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val); + +	reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848); +	reg_val &= ~(1<<12); +	QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val); + +	reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C); +	reg_val &= ~(1<<12); +	QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val); + +	for (i = 0; i < 100000; i++) { +		if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 && +		    (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0) +			break; +		udelay(100); +	}  	/* Set all 4 cores in reset */  	for (i = 0; i < 4; i++) {  		QLAFX00_SET_HBA_SOC_REG(ha,  		    (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01)); -	} - -	/* Set all 4 core Clock gating control */ -	for (i = 0; i < 4; i++) {  		QLAFX00_SET_HBA_SOC_REG(ha,  		    (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));  	}  	/* Reset all units in Fabric */ -	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101)); +	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101)); + +	/* */ +	QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1); +	QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0); + +	/* Set all 4 core Memory Power Down Registers */ +	for (i = 0; i < 5; i++) { +		QLAFX00_SET_HBA_SOC_REG(ha, +		    (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0)); +	}  	/* Reset all interrupt control registers */  	for (i = 0; i < 115; i++) { @@ -564,20 +606,19 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)  	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));  	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3)); -	spin_lock_irqsave(&ha->hardware_lock, flags); -  	/* Kick in Fabric units */  	QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));  	/* Kick in Core0 to start boot process */  	QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00)); +	spin_unlock_irqrestore(&ha->hardware_lock, flags); +  	/* Wait 10secs for soft-reset to complete. */  	for (cnt = 10; cnt; cnt--) {  		msleep(1000);  		barrier();  	} -	spin_unlock_irqrestore(&ha->hardware_lock, flags);  }  /** @@ -597,7 +638,6 @@ qlafx00_soft_reset(scsi_qla_host_t *vha)  	ha->isp_ops->disable_intrs(ha);  	qlafx00_soc_cpu_reset(vha); -	ha->isp_ops->enable_intrs(ha);  }  /** @@ -631,20 +671,6 @@ qlafx00_config_rings(struct scsi_qla_host *vha)  {  	struct qla_hw_data *ha = vha->hw;  	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; -	struct init_cb_fx *icb; -	struct req_que *req = ha->req_q_map[0]; -	struct rsp_que *rsp = ha->rsp_q_map[0]; - -	/* Setup ring parameters in initialization control block. */ -	icb = (struct init_cb_fx *)ha->init_cb; -	icb->request_q_outpointer = __constant_cpu_to_le16(0); -	icb->response_q_inpointer = __constant_cpu_to_le16(0); -	icb->request_q_length = cpu_to_le16(req->length); -	icb->response_q_length = cpu_to_le16(rsp->length); -	icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); -	icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); -	icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); -	icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));  	WRT_REG_DWORD(®->req_q_in, 0);  	WRT_REG_DWORD(®->req_q_out, 0); @@ -660,10 +686,8 @@ char *  qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)  {  	struct qla_hw_data *ha = vha->hw; -	int pcie_reg; -	pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP); -	if (pcie_reg) { +	if (pci_is_pcie(ha->pdev)) {  		strcpy(str, "PCIe iSA");  		return str;  	} @@ -701,78 +725,16 @@ qlafx00_disable_intrs(struct qla_hw_data *ha)  	spin_unlock_irqrestore(&ha->hardware_lock, flags);  } -static void -qlafx00_tmf_iocb_timeout(void *data) -{ -	srb_t *sp = (srb_t *)data; -	struct srb_iocb *tmf = &sp->u.iocb_cmd; - -	tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT); -	complete(&tmf->u.tmf.comp); -} - -static void -qlafx00_tmf_sp_done(void *data, void *ptr, int res) -{ -	srb_t *sp = (srb_t *)ptr; -	struct srb_iocb *tmf = &sp->u.iocb_cmd; - -	complete(&tmf->u.tmf.comp); -} - -static int -qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, -		     uint32_t lun, uint32_t tag) -{ -	scsi_qla_host_t *vha = fcport->vha; -	struct srb_iocb *tm_iocb; -	srb_t *sp; -	int rval = QLA_FUNCTION_FAILED; - -	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); -	if (!sp) -		goto done; - -	tm_iocb = &sp->u.iocb_cmd; -	sp->type = SRB_TM_CMD; -	sp->name = "tmf"; -	qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); -	tm_iocb->u.tmf.flags = flags; -	tm_iocb->u.tmf.lun = lun; -	tm_iocb->u.tmf.data = tag; -	sp->done = qlafx00_tmf_sp_done; -	tm_iocb->timeout = qlafx00_tmf_iocb_timeout; -	init_completion(&tm_iocb->u.tmf.comp); - -	rval = qla2x00_start_sp(sp); -	if (rval != QLA_SUCCESS) -		goto done_free_sp; - -	ql_dbg(ql_dbg_async, vha, 0x507b, -	    "Task management command issued target_id=%x\n", -	    fcport->tgt_id); - -	wait_for_completion(&tm_iocb->u.tmf.comp); - -	rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? -	    QLA_SUCCESS : QLA_FUNCTION_FAILED; - -done_free_sp: -	sp->free(vha, sp); -done: -	return rval; -} -  int  qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)  { -	return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag); +	return qla2x00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);  }  int  qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)  { -	return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag); +	return qla2x00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);  }  int @@ -999,6 +961,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)  			break;  		default: +			if ((aenmbx & 0xFF00) == MBA_FW_INIT_INPROGRESS) +				break; +  			/* If fw is apparently not ready. In order to continue,  			 * we might need to issue Mbox cmd, but the problem is  			 * that the DoorBell vector values that come with the @@ -1612,6 +1577,22 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)  			ha->mr.fw_critemp_timer_tick--;  		}  	} +	if (ha->mr.host_info_resend) { +		/* +		 * Incomplete host info might be sent to firmware +		 * durinng system boot - info should be resend +		 */ +		if (ha->mr.hinfo_resend_timer_tick == 0) { +			ha->mr.host_info_resend = false; +			set_bit(FX00_HOST_INFO_RESEND, &vha->dpc_flags); +			ha->mr.hinfo_resend_timer_tick = +			    QLAFX00_HINFO_RESEND_INTERVAL; +			qla2xxx_wake_dpc(vha); +		} else { +			ha->mr.hinfo_resend_timer_tick--; +		} +	} +  }  /* @@ -1869,6 +1850,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)  			goto done_free_sp;  		}  		break; +	case FXDISC_ABORT_IOCTL:  	default:  		break;  	} @@ -1890,6 +1872,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)  			    p_sysid->sysname, SYSNAME_LENGTH);  			strncpy(phost_info->nodename,  			    p_sysid->nodename, NODENAME_LENGTH); +			if (!strcmp(phost_info->nodename, "(none)")) +				ha->mr.host_info_resend = true;  			strncpy(phost_info->release,  			    p_sysid->release, RELEASE_LENGTH);  			strncpy(phost_info->version, @@ -1950,8 +1934,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)  	if (fx_type == FXDISC_GET_CONFIG_INFO) {  		struct config_info_data *pinfo =  		    (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; -		memcpy(&vha->hw->mr.product_name, pinfo->product_name, -		    sizeof(vha->hw->mr.product_name)); +		strcpy(vha->hw->model_number, pinfo->model_num); +		strcpy(vha->hw->model_desc, pinfo->model_description);  		memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,  		    sizeof(vha->hw->mr.symbolic_name));  		memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, @@ -1995,7 +1979,12 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)  		ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,  		    (uint8_t *)pinfo, 16);  		memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); -	} +	} else if (fx_type == FXDISC_ABORT_IOCTL) +		fdisc->u.fxiocb.result = +		    (fdisc->u.fxiocb.result == +			cpu_to_le32(QLAFX00_IOCTL_ICOB_ABORT_SUCCESS)) ? +		    cpu_to_le32(QLA_SUCCESS) : cpu_to_le32(QLA_FUNCTION_FAILED); +  	rval = le32_to_cpu(fdisc->u.fxiocb.result);  done_unmap_dma: @@ -2013,90 +2002,6 @@ done:  	return rval;  } -static void -qlafx00_abort_iocb_timeout(void *data) -{ -	srb_t *sp = (srb_t *)data; -	struct srb_iocb *abt = &sp->u.iocb_cmd; - -	abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT); -	complete(&abt->u.abt.comp); -} - -static void -qlafx00_abort_sp_done(void *data, void *ptr, int res) -{ -	srb_t *sp = (srb_t *)ptr; -	struct srb_iocb *abt = &sp->u.iocb_cmd; - -	complete(&abt->u.abt.comp); -} - -static int -qlafx00_async_abt_cmd(srb_t *cmd_sp) -{ -	scsi_qla_host_t *vha = cmd_sp->fcport->vha; -	fc_port_t *fcport = cmd_sp->fcport; -	struct srb_iocb *abt_iocb; -	srb_t *sp; -	int rval = QLA_FUNCTION_FAILED; - -	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); -	if (!sp) -		goto done; - -	abt_iocb = &sp->u.iocb_cmd; -	sp->type = SRB_ABT_CMD; -	sp->name = "abort"; -	qla2x00_init_timer(sp, FXDISC_TIMEOUT); -	abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; -	sp->done = qlafx00_abort_sp_done; -	abt_iocb->timeout = qlafx00_abort_iocb_timeout; -	init_completion(&abt_iocb->u.abt.comp); - -	rval = qla2x00_start_sp(sp); -	if (rval != QLA_SUCCESS) -		goto done_free_sp; - -	ql_dbg(ql_dbg_async, vha, 0x507c, -	    "Abort command issued - hdl=%x, target_id=%x\n", -	    cmd_sp->handle, fcport->tgt_id); - -	wait_for_completion(&abt_iocb->u.abt.comp); - -	rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? -	    QLA_SUCCESS : QLA_FUNCTION_FAILED; - -done_free_sp: -	sp->free(vha, sp); -done: -	return rval; -} - -int -qlafx00_abort_command(srb_t *sp) -{ -	unsigned long   flags = 0; - -	uint32_t	handle; -	fc_port_t	*fcport = sp->fcport; -	struct scsi_qla_host *vha = fcport->vha; -	struct qla_hw_data *ha = vha->hw; -	struct req_que *req = vha->req; - -	spin_lock_irqsave(&ha->hardware_lock, flags); -	for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) { -		if (req->outstanding_cmds[handle] == sp) -			break; -	} -	spin_unlock_irqrestore(&ha->hardware_lock, flags); -	if (handle == DEFAULT_OUTSTANDING_COMMANDS) { -		/* Command not found. */ -		return QLA_FUNCTION_FAILED; -	} -	return qlafx00_async_abt_cmd(sp); -} -  /*   * qlafx00_initialize_adapter   *      Initialize board. @@ -2125,7 +2030,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)  	vha->device_flags = DFLG_NO_CABLE;  	vha->dpc_flags = 0;  	vha->flags.management_server_logged_in = 0; -	vha->marker_needed = 0;  	ha->isp_abort_cnt = 0;  	ha->beacon_blink_led = 0; @@ -2329,8 +2233,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,  		fstatus.ioctl_flags = pkt->fw_iotcl_flags;  		fstatus.ioctl_data = pkt->dataword_r;  		fstatus.adapid = pkt->adapid; -		fstatus.adapid_hi = pkt->adapid_hi; -		fstatus.reserved_2 = pkt->reserved_1; +		fstatus.reserved_2 = pkt->dataword_r_extra;  		fstatus.res_count = pkt->residuallen;  		fstatus.status = pkt->status;  		fstatus.seq_number = pkt->seq_no; @@ -2421,7 +2324,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)  	/* Fast path completion. */  	if (comp_status == CS_COMPLETE && scsi_status == 0) { -		qla2x00_do_host_ramp_up(vha);  		qla2x00_process_completed_request(vha, req, handle);  		return;  	} @@ -2632,9 +2534,6 @@ check_scsi_status:  		    rsp_info_len, resid_len, fw_resid_len, sense_len,  		    par_sense_len, rsp_info_len); -	if (!res) -		qla2x00_do_host_ramp_up(vha); -  	if (rsp->status_srb == NULL)  		sp->done(ha, sp, res);  } @@ -2783,7 +2682,7 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,  	srb_t *sp;  	struct qla_hw_data *ha = vha->hw;  	const char func[] = "ERROR-IOCB"; -	uint16_t que = MSW(pkt->handle); +	uint16_t que = 0;  	struct req_que *req = NULL;  	int res = DID_ERROR << 16; @@ -2812,16 +2711,22 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,  {  	struct sts_entry_fx00 *pkt;  	response_t *lptr; +	uint16_t lreq_q_in = 0; +	uint16_t lreq_q_out = 0; -	while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) != -	    RESPONSE_PROCESSED) { +	lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); +	lreq_q_out = rsp->ring_index; + +	while (lreq_q_in != lreq_q_out) {  		lptr = rsp->ring_ptr;  		memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,  		    sizeof(rsp->rsp_pkt));  		pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;  		rsp->ring_index++; +		lreq_q_out++;  		if (rsp->ring_index == rsp->length) { +			lreq_q_out = 0;  			rsp->ring_index = 0;  			rsp->ring_ptr = rsp->ring;  		} else { @@ -2833,7 +2738,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,  			qlafx00_error_entry(vha, rsp,  			    (struct sts_entry_fx00 *)pkt, pkt->entry_status,  			    pkt->entry_type); -			goto next_iter;  			continue;  		} @@ -2867,10 +2771,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,  			    pkt->entry_type, pkt->entry_status);  			break;  		} -next_iter: -		WRT_REG_DWORD((void __iomem *)&lptr->signature, -		    RESPONSE_PROCESSED); -		wmb();  	}  	/* Adjust ring index */ @@ -2905,9 +2805,9 @@ qlafx00_async_event(scsi_qla_host_t *vha)  		break;  	case QLAFX00_MBA_PORT_UPDATE:		/* Port database update */ -		ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); -		ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); -		ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); +		ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1); +		ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2); +		ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3);  		ql_dbg(ql_dbg_async, vha, 0x5077,  		    "Asynchronous port Update received "  		    "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", @@ -2964,7 +2864,7 @@ static void  qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)  {  	uint16_t	cnt; -	uint16_t __iomem *wptr; +	uint32_t __iomem *wptr;  	struct qla_hw_data *ha = vha->hw;  	struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; @@ -2974,10 +2874,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)  	/* Load return mailbox registers. */  	ha->flags.mbox_int = 1;  	ha->mailbox_out32[0] = mb0; -	wptr = (uint16_t __iomem *)®->mailbox17; +	wptr = (uint32_t __iomem *)®->mailbox17;  	for (cnt = 1; cnt < ha->mbx_count; cnt++) { -		ha->mailbox_out32[cnt] = RD_REG_WORD(wptr); +		ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr);  		wptr++;  	}  } @@ -3004,6 +2904,7 @@ qlafx00_intr_handler(int irq, void *dev_id)  	struct rsp_que *rsp;  	unsigned long	flags;  	uint32_t clr_intr = 0; +	uint32_t intr_stat = 0;  	rsp = (struct rsp_que *) dev_id;  	if (!rsp) { @@ -3023,34 +2924,28 @@ qlafx00_intr_handler(int irq, void *dev_id)  	vha = pci_get_drvdata(ha->pdev);  	for (iter = 50; iter--; clr_intr = 0) {  		stat = QLAFX00_RD_INTR_REG(ha); -		if ((stat & QLAFX00_HST_INT_STS_BITS) == 0) +		if (qla2x00_check_reg_for_disconnect(vha, stat)) +			break; +		intr_stat = stat & QLAFX00_HST_INT_STS_BITS; +		if (!intr_stat)  			break; -		switch (stat & QLAFX00_HST_INT_STS_BITS) { -		case QLAFX00_INTR_MB_CMPLT: -		case QLAFX00_INTR_MB_RSP_CMPLT: -		case QLAFX00_INTR_MB_ASYNC_CMPLT: -		case QLAFX00_INTR_ALL_CMPLT: +		if (stat & QLAFX00_INTR_MB_CMPLT) {  			mb[0] = RD_REG_WORD(®->mailbox16);  			qlafx00_mbx_completion(vha, mb[0]);  			status |= MBX_INTERRUPT;  			clr_intr |= QLAFX00_INTR_MB_CMPLT; -			break; -		case QLAFX00_INTR_ASYNC_CMPLT: -		case QLAFX00_INTR_RSP_ASYNC_CMPLT: +		} +		if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) {  			ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0);  			qlafx00_async_event(vha);  			clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; -			break; -		case QLAFX00_INTR_RSP_CMPLT: +		} +		if (intr_stat & QLAFX00_INTR_RSP_CMPLT) {  			qlafx00_process_response_queue(vha, rsp);  			clr_intr |= QLAFX00_INTR_RSP_CMPLT; -			break; -		default: -			ql_dbg(ql_dbg_async, vha, 0x507a, -			    "Unrecognized interrupt type (%d).\n", stat); -			break;  		} +  		QLAFX00_CLR_INTR_REG(ha, clr_intr);  		QLAFX00_RD_INTR_REG(ha);  	} @@ -3200,17 +3095,6 @@ qlafx00_start_scsi(srb_t *sp)  	/* So we know we haven't pci_map'ed anything yet */  	tot_dsds = 0; -	/* Forcing marker needed for now */ -	vha->marker_needed = 0; - -	/* Send marker if required */ -	if (vha->marker_needed != 0) { -		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != -		    QLA_SUCCESS) -			return QLA_FUNCTION_FAILED; -		vha->marker_needed = 0; -	} -  	/* Acquire ring specific lock */  	spin_lock_irqsave(&ha->hardware_lock, flags); @@ -3261,7 +3145,9 @@ qlafx00_start_scsi(srb_t *sp)  	memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);  	lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle); -	lcmd_pkt.handle_hi = 0; +	lcmd_pkt.reserved_0 = 0; +	lcmd_pkt.port_path_ctrl = 0; +	lcmd_pkt.reserved_1 = 0;  	lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);  	lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id); @@ -3341,8 +3227,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)  	tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;  	tm_iocb.entry_count = 1;  	tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); -	tm_iocb.handle_hi = 0; -	tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2); +	tm_iocb.reserved_0 = 0;  	tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);  	tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);  	if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) { @@ -3581,7 +3466,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)  	    sp->fcport->vha, 0x3047,  	    (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); -	memcpy((void *)pfxiocb, &fx_iocb, +	memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,  	    sizeof(struct fxdisc_entry_fx00));  	wmb();  } diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h index 79a93c52bae..aeaa1b40b1f 100644 --- a/drivers/scsi/qla2xxx/qla_mr.h +++ b/drivers/scsi/qla2xxx/qla_mr.h @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -22,13 +22,16 @@ struct cmd_type_7_fx00 {  	uint8_t entry_status;		/* Entry Status. */  	uint32_t handle;		/* System handle. */ -	uint32_t handle_hi; +	uint8_t reserved_0; +	uint8_t port_path_ctrl; +	uint16_t reserved_1;  	__le16 tgt_idx;		/* Target Idx. */  	uint16_t timeout;		/* Command timeout. */  	__le16 dseg_count;		/* Data segment count. */ -	uint16_t scsi_rsp_dsd_len; +	uint8_t	scsi_rsp_dsd_len; +	uint8_t reserved_2;  	struct scsi_lun lun;		/* LUN (LE). */ @@ -47,30 +50,6 @@ struct cmd_type_7_fx00 {  	uint32_t dseg_0_len;		/* Data segment 0 length. */  }; -/* - * ISP queue - marker entry structure definition. - */ -struct mrk_entry_fx00 { -	uint8_t entry_type;		/* Entry type. */ -	uint8_t entry_count;		/* Entry count. */ -	uint8_t handle_count;		/* Handle count. */ -	uint8_t entry_status;		/* Entry Status. */ - -	uint32_t handle;		/* System handle. */ -	uint32_t handle_hi;		/* System handle. */ - -	uint16_t tgt_id;		/* Target ID. */ - -	uint8_t modifier;		/* Modifier (7-0). */ -	uint8_t reserved_1; - -	uint8_t reserved_2[5]; - -	uint8_t lun[8];			/* FCP LUN (BE). */ -	uint8_t reserved_3[36]; -}; - -  #define	STATUS_TYPE_FX00	0x01		/* Status entry. */  struct sts_entry_fx00 {  	uint8_t entry_type;		/* Entry type. */ @@ -79,7 +58,7 @@ struct sts_entry_fx00 {  	uint8_t entry_status;		/* Entry Status. */  	uint32_t handle;		/* System handle. */ -	uint32_t handle_hi;		/* System handle. */ +	uint32_t reserved_3;		/* System handle. */  	__le16 comp_status;		/* Completion status. */  	uint16_t reserved_0;		/* OX_ID used by the firmware. */ @@ -102,7 +81,7 @@ struct sts_entry_fx00 {  struct multi_sts_entry_fx00 {  	uint8_t entry_type;		/* Entry type. */ -	uint8_t sys_define;		/* System defined. */ +	uint8_t entry_count;		/* Entry count. */  	uint8_t handle_count;  	uint8_t entry_status; @@ -118,15 +97,13 @@ struct tsk_mgmt_entry_fx00 {  	__le32 handle;		/* System handle. */ -	uint32_t handle_hi;		/* System handle. */ +	uint32_t reserved_0;  	__le16 tgt_id;		/* Target Idx. */  	uint16_t reserved_1; - -	uint16_t delay;			/* Activity delay in seconds. */ - -	__le16 timeout;		/* Command timeout. */ +	uint16_t reserved_3; +	uint16_t reserved_4;  	struct scsi_lun lun;		/* LUN (LE). */ @@ -144,13 +121,13 @@ struct abort_iocb_entry_fx00 {  	uint8_t entry_status;		/* Entry Status. */  	__le32 handle;		/* System handle. */ -	__le32 handle_hi;		/* System handle. */ +	__le32 reserved_0;  	__le16 tgt_id_sts;		/* Completion status. */  	__le16 options;  	__le32 abort_handle;		/* System handle. */ -	__le32 abort_handle_hi;	/* System handle. */ +	__le32 reserved_2;  	__le16 req_que_no;  	uint8_t reserved_1[38]; @@ -171,8 +148,7 @@ struct ioctl_iocb_entry_fx00 {  	__le32 dataword_r;		/* Data word returned */  	uint32_t adapid;		/* Adapter ID */ -	uint32_t adapid_hi;		/* Adapter ID high */ -	uint32_t reserved_1; +	uint32_t dataword_r_extra;  	__le32 seq_no;  	uint8_t reserved_2[20]; @@ -304,7 +280,9 @@ struct register_host_info {  #define QLAFX00_TGT_NODE_LIST_SIZE (sizeof(uint32_t) * 32)  struct config_info_data { -	uint8_t		product_name[256]; +	uint8_t		model_num[16]; +	uint8_t		model_description[80]; +	uint8_t		reserved0[160];  	uint8_t		symbolic_name[64];  	uint8_t		serial_num[32];  	uint8_t		hw_version[16]; @@ -343,6 +321,7 @@ struct config_info_data {  #define FXDISC_GET_TGT_NODE_INFO	0x80  #define FXDISC_GET_TGT_NODE_LIST	0x81  #define FXDISC_REG_HOST_INFO		0x99 +#define FXDISC_ABORT_IOCTL		0xff  #define QLAFX00_HBA_ICNTRL_REG		0x20B08  #define QLAFX00_ICR_ENB_MASK            0x80000000 @@ -357,11 +336,7 @@ struct config_info_data {  #define QLAFX00_INTR_MB_CMPLT		0x1  #define QLAFX00_INTR_RSP_CMPLT		0x2 -#define QLAFX00_INTR_MB_RSP_CMPLT	0x3  #define QLAFX00_INTR_ASYNC_CMPLT	0x4 -#define QLAFX00_INTR_MB_ASYNC_CMPLT	0x5 -#define QLAFX00_INTR_RSP_ASYNC_CMPLT	0x6 -#define QLAFX00_INTR_ALL_CMPLT		0x7  #define QLAFX00_MBA_SYSTEM_ERR		0x8002  #define QLAFX00_MBA_TEMP_OVER		0x8005 @@ -376,6 +351,7 @@ struct config_info_data {  #define SOC_FABRIC_RST_CONTROL_REG       0x0020840  #define SOC_FABRIC_CONTROL_REG           0x0020200  #define SOC_FABRIC_CONFIG_REG            0x0020204 +#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG  0x001820C  #define SOC_INTERRUPT_SOURCE_I_CONTROL_REG     0x0020B00  #define SOC_CORE_TIMER_REG                     0x0021850 @@ -490,7 +466,6 @@ struct qla_mt_iocb_rsp_fx00 {  #define FX00_DEF_RATOV	10  struct mr_data_fx00 { -	uint8_t	product_name[256];  	uint8_t	symbolic_name[64];  	uint8_t	serial_num[32];  	uint8_t	hw_version[16]; @@ -511,6 +486,8 @@ struct mr_data_fx00 {  	uint32_t old_aenmbx0_state;  	uint32_t critical_temperature;  	bool extended_io_enabled; +	bool host_info_resend; +	uint8_t hinfo_resend_timer_tick;  };  #define QLAFX00_EXTENDED_IO_EN_MASK    0x20 @@ -537,7 +514,14 @@ struct mr_data_fx00 {  #define QLAFX00_RESET_INTERVAL		120	/* number of seconds */  #define QLAFX00_MAX_RESET_INTERVAL	600	/* number of seconds */  #define QLAFX00_CRITEMP_INTERVAL	60	/* number of seconds */ +#define QLAFX00_HINFO_RESEND_INTERVAL	60	/* number of seconds */  #define QLAFX00_CRITEMP_THRSHLD		80	/* Celsius degrees */ +/* Max conncurrent IOs that can be queued */ +#define QLAFX00_MAX_CANQUEUE		1024 + +/* IOCTL IOCB abort success */ +#define QLAFX00_IOCTL_ICOB_ABORT_SUCCESS	0x68 +  #endif diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 11ce53dcbe7..58f3c912d96 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -848,6 +848,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)  {  	int done = 0, timeout = 0;  	uint32_t lock_owner = 0; +	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);  	while (!done) {  		/* acquire semaphore2 from PCI HW block */ @@ -856,17 +857,21 @@ qla82xx_rom_lock(struct qla_hw_data *ha)  			break;  		if (timeout >= qla82xx_rom_lock_timeout) {  			lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); +			ql_log(ql_log_warn, vha, 0xb157, +			    "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", +			    __func__, ha->portnum, lock_owner);  			return -1;  		}  		timeout++;  	} -	qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); +	qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum);  	return 0;  }  static void  qla82xx_rom_unlock(struct qla_hw_data *ha)  { +	qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff);  	qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));  } @@ -950,6 +955,7 @@ static int  qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)  {  	int ret, loops = 0; +	uint32_t lock_owner = 0;  	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);  	while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { @@ -958,8 +964,10 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)  		loops++;  	}  	if (loops >= 50000) { +		lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);  		ql_log(ql_log_fatal, vha, 0x00b9, -		    "Failed to acquire SEM2 lock.\n"); +		    "Failed to acquire SEM2 lock, Lock Owner %u.\n", +		    lock_owner);  		return -1;  	}  	ret = qla82xx_do_rom_fast_read(ha, addr, valp); @@ -1057,6 +1065,7 @@ static int  ql82xx_rom_lock_d(struct qla_hw_data *ha)  {  	int loops = 0; +	uint32_t lock_owner = 0;  	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);  	while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { @@ -1065,8 +1074,9 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)  		loops++;  	}  	if (loops >= 50000) { +		lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);  		ql_log(ql_log_warn, vha, 0xb010, -		    "ROM lock failed.\n"); +		    "ROM lock failed, Lock Owner %u.\n", lock_owner);  		return -1;  	}  	return 0; @@ -1664,10 +1674,10 @@ qla82xx_iospace_config(struct qla_hw_data *ha)  	/* Mapping of IO base pointer */  	if (IS_QLA8044(ha)) {  		ha->iobase = -		    (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase); +		    (device_reg_t *)((uint8_t *)ha->nx_pcibase);  	} else if (IS_QLA82XX(ha)) {  		ha->iobase = -		    (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + +		    (device_reg_t *)((uint8_t *)ha->nx_pcibase +  			0xbc000 + (ha->pdev->devfn << 11));  	} @@ -2096,6 +2106,7 @@ qla82xx_msix_default(int irq, void *dev_id)  	int status = 0;  	unsigned long flags;  	uint32_t stat = 0; +	uint32_t host_int = 0;  	uint16_t mb[4];  	rsp = (struct rsp_que *) dev_id; @@ -2111,7 +2122,10 @@ qla82xx_msix_default(int irq, void *dev_id)  	spin_lock_irqsave(&ha->hardware_lock, flags);  	vha = pci_get_drvdata(ha->pdev);  	do { -		if (RD_REG_DWORD(®->host_int)) { +		host_int = RD_REG_DWORD(®->host_int); +		if (qla2x00_check_reg_for_disconnect(vha, host_int)) +			break; +		if (host_int) {  			stat = RD_REG_DWORD(®->host_status);  			switch (stat & 0xff) { @@ -2156,6 +2170,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)  	struct rsp_que *rsp;  	struct device_reg_82xx __iomem *reg;  	unsigned long flags; +	uint32_t host_int = 0;  	rsp = (struct rsp_que *) dev_id;  	if (!rsp) { @@ -2168,8 +2183,12 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)  	reg = &ha->iobase->isp82;  	spin_lock_irqsave(&ha->hardware_lock, flags);  	vha = pci_get_drvdata(ha->pdev); +	host_int = RD_REG_DWORD(®->host_int); +	if (qla2x00_check_reg_for_disconnect(vha, host_int)) +		goto out;  	qla24xx_process_response_queue(vha, rsp);  	WRT_REG_DWORD(®->host_int, 0); +out:  	spin_unlock_irqrestore(&ha->hardware_lock, flags);  	return IRQ_HANDLED;  } @@ -2183,6 +2202,7 @@ qla82xx_poll(int irq, void *dev_id)  	struct device_reg_82xx __iomem *reg;  	int status = 0;  	uint32_t stat; +	uint32_t host_int = 0;  	uint16_t mb[4];  	unsigned long flags; @@ -2198,7 +2218,10 @@ qla82xx_poll(int irq, void *dev_id)  	spin_lock_irqsave(&ha->hardware_lock, flags);  	vha = pci_get_drvdata(ha->pdev); -	if (RD_REG_DWORD(®->host_int)) { +	host_int = RD_REG_DWORD(®->host_int); +	if (qla2x00_check_reg_for_disconnect(vha, host_int)) +		goto out; +	if (host_int) {  		stat = RD_REG_DWORD(®->host_status);  		switch (stat & 0xff) {  		case 0x1: @@ -2224,8 +2247,9 @@ qla82xx_poll(int irq, void *dev_id)  			    stat * 0xff);  			break;  		} +		WRT_REG_DWORD(®->host_int, 0);  	} -	WRT_REG_DWORD(®->host_int, 0); +out:  	spin_unlock_irqrestore(&ha->hardware_lock, flags);  } @@ -2797,12 +2821,14 @@ static void  qla82xx_rom_lock_recovery(struct qla_hw_data *ha)  {  	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); +	uint32_t lock_owner = 0; -	if (qla82xx_rom_lock(ha)) +	if (qla82xx_rom_lock(ha)) { +		lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);  		/* Someone else is holding the lock. */  		ql_log(ql_log_info, vha, 0xb022, -		    "Resetting rom_lock.\n"); - +		    "Resetting rom_lock, Lock Owner %u.\n", lock_owner); +	}  	/*  	 * Either we got the lock, or someone  	 * else died while holding it. @@ -2826,47 +2852,30 @@ static int  qla82xx_device_bootstrap(scsi_qla_host_t *vha)  {  	int rval = QLA_SUCCESS; -	int i, timeout; +	int i;  	uint32_t old_count, count;  	struct qla_hw_data *ha = vha->hw; -	int need_reset = 0, peg_stuck = 1; +	int need_reset = 0;  	need_reset = qla82xx_need_reset(ha); -	old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); - -	for (i = 0; i < 10; i++) { -		timeout = msleep_interruptible(200); -		if (timeout) { -			qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, -				QLA8XXX_DEV_FAILED); -			return QLA_FUNCTION_FAILED; -		} - -		count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); -		if (count != old_count) -			peg_stuck = 0; -	} -  	if (need_reset) {  		/* We are trying to perform a recovery here. */ -		if (peg_stuck) +		if (ha->flags.isp82xx_fw_hung)  			qla82xx_rom_lock_recovery(ha); -		goto dev_initialize;  	} else  { -		/* Start of day for this ha context. */ -		if (peg_stuck) { -			/* Either we are the first or recovery in progress. */ -			qla82xx_rom_lock_recovery(ha); -			goto dev_initialize; -		} else -			/* Firmware already running. */ -			goto dev_ready; +		old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); +		for (i = 0; i < 10; i++) { +			msleep(200); +			count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); +			if (count != old_count) { +				rval = QLA_SUCCESS; +				goto dev_ready; +			} +		} +		qla82xx_rom_lock_recovery(ha);  	} -	return rval; - -dev_initialize:  	/* set to DEV_INITIALIZING */  	ql_log(ql_log_info, vha, 0x009e,  	    "HW State: INITIALIZING.\n"); @@ -3003,7 +3012,7 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)  		qla82xx_clear_drv_active(ha);  		qla82xx_idc_unlock(ha);  	} else if (IS_QLA8044(ha)) { -		qla8044_clear_drv_active(vha); +		qla8044_clear_drv_active(ha);  		qla8044_idc_unlock(ha);  	} @@ -3128,18 +3137,18 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)  	if (ql2xmdenable) {  		if (!ha->fw_dumped) { -			if (fw_major_version != ha->fw_major_version || +			if ((fw_major_version != ha->fw_major_version ||  			    fw_minor_version != ha->fw_minor_version || -			    fw_subminor_version != ha->fw_subminor_version) { +			    fw_subminor_version != ha->fw_subminor_version) || +			    (ha->prev_minidump_failed)) {  				ql_dbg(ql_dbg_p3p, vha, 0xb02d, -				    "Firmware version differs " -				    "Previous version: %d:%d:%d - " -				    "New version: %d:%d:%d\n", +				    "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n",  				    fw_major_version, fw_minor_version,  				    fw_subminor_version,  				    ha->fw_major_version,  				    ha->fw_minor_version, -				    ha->fw_subminor_version); +				    ha->fw_subminor_version, +				    ha->prev_minidump_failed);  				/* Release MiniDump resources */  				qla82xx_md_free(vha);  				/* ALlocate MiniDump resources */ @@ -3668,8 +3677,10 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)  			for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {  				sp = req->outstanding_cmds[cnt];  				if (sp) { -					if (!sp->u.scmd.ctx || -					    (sp->flags & SRB_FCP_CMND_DMA_VALID)) { +					if ((!sp->u.scmd.ctx || +					    (sp->flags & +						SRB_FCP_CMND_DMA_VALID)) && +						!ha->flags.isp82xx_fw_hung) {  						spin_unlock_irqrestore(  						    &ha->hardware_lock, flags);  						if (ha->isp_ops->abort_command(sp)) { @@ -4488,3 +4499,20 @@ exit:  	qla82xx_idc_unlock(ha);  	return rval;  } + +void +qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) +{ +	struct qla_hw_data *ha = vha->hw; + +	if (!ha->allow_cna_fw_dump) +		return; + +	scsi_block_requests(vha->host); +	ha->flags.isp82xx_no_md_cap = 1; +	qla82xx_idc_lock(ha); +	qla82xx_set_reset_owner(vha); +	qla82xx_idc_unlock(ha); +	qla2x00_wait_for_chip_reset(vha); +	scsi_unblock_requests(vha->host); +} diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 1bb93dbbccb..59c477883a7 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -333,9 +333,6 @@  #define QLA82XX_ROMUSB_ROM_INSTR_OPCODE		(ROMUSB_ROM + 0x0004)  #define QLA82XX_ROMUSB_GLB_CAS_RST		(ROMUSB_GLB + 0x0038) -/* Lock IDs for ROM lock */ -#define ROM_LOCK_DRIVER       0x0d417340 -  #define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000	 /* all are 1MB windows */  #define QLA82XX_PCI_CRB_WINDOW(A) \  	(QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE) @@ -1186,6 +1183,7 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,  #define CRB_NIU_XG_PAUSE_CTL_P1        0x8  #define qla82xx_get_temp_val(x)          ((x) >> 16) +#define qla82xx_get_temp_val1(x)          ((x) && 0x0000FFFF)  #define qla82xx_get_temp_state(x)        ((x) & 0xffff)  #define qla82xx_encode_temp(val, state)  (((val) << 16) | (state)) diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 8164cc9e728..da9e3902f21 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -1,17 +1,20 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */  #include <linux/vmalloc.h> +#include <linux/delay.h>  #include "qla_def.h"  #include "qla_gbl.h"  #include <linux/delay.h> +#define TIMEOUT_100_MS 100 +  /* 8044 Flash Read/Write functions */  uint32_t  qla8044_rd_reg(struct qla_hw_data *ha, ulong addr) @@ -117,6 +120,95 @@ qla8044_read_write_crb_reg(struct scsi_qla_host *vha,  	qla8044_wr_reg_indirect(vha, waddr, value);  } +static int +qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1, +	uint32_t mask) +{ +	unsigned long timeout; +	uint32_t temp; + +	/* jiffies after 100ms */ +	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); +	do { +		qla8044_rd_reg_indirect(vha, addr1, &temp); +		if ((temp & mask) != 0) +			break; +		if (time_after_eq(jiffies, timeout)) { +			ql_log(ql_log_warn, vha, 0xb151, +				"Error in processing rdmdio entry\n"); +			return -1; +		} +	} while (1); + +	return 0; +} + +static uint32_t +qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha, +	uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr) +{ +	uint32_t temp; +	int ret = 0; + +	ret = qla8044_poll_wait_for_ready(vha, addr1, mask); +	if (ret == -1) +		return -1; + +	temp = (0x40000000 | addr); +	qla8044_wr_reg_indirect(vha, addr1, temp); + +	ret = qla8044_poll_wait_for_ready(vha, addr1, mask); +	if (ret == -1) +		return 0; + +	qla8044_rd_reg_indirect(vha, addr3, &ret); + +	return ret; +} + + +static int +qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha, +	uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask) +{ +	unsigned long timeout; +	uint32_t temp; + +	/* jiffies after 100 msecs */ +	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); +	do { +		temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2); +		if ((temp & 0x1) != 1) +			break; +		if (time_after_eq(jiffies, timeout)) { +			ql_log(ql_log_warn, vha, 0xb152, +			    "Error in processing mdiobus idle\n"); +			return -1; +		} +	} while (1); + +	return 0; +} + +static int +qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1, +	uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value) +{ +	int ret = 0; + +	ret = qla8044_poll_wait_for_ready(vha, addr1, mask); +	if (ret == -1) +		return -1; + +	qla8044_wr_reg_indirect(vha, addr3, value); +	qla8044_wr_reg_indirect(vha, addr1, addr); + +	ret = qla8044_poll_wait_for_ready(vha, addr1, mask); +	if (ret == -1) +		return -1; + +	return 0; +}  /*   * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,   * Shift Left,Right/OR/XOR with values RMW header and write value to waddr. @@ -356,8 +448,8 @@ qla8044_flash_lock(scsi_qla_host_t *vha)  			lock_owner = qla8044_rd_reg(ha,  			    QLA8044_FLASH_LOCK_ID);  			ql_log(ql_log_warn, vha, 0xb113, -			    "%s: flash lock by %d failed, held by %d\n", -				__func__, ha->portnum, lock_owner); +			    "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", +			    __func__, ha->portnum, lock_owner);  			ret_val = QLA_FUNCTION_FAILED;  			break;  		} @@ -1257,10 +1349,10 @@ exit_start_fw:  }  void -qla8044_clear_drv_active(struct scsi_qla_host *vha) +qla8044_clear_drv_active(struct qla_hw_data *ha)  {  	uint32_t drv_active; -	struct qla_hw_data *ha = vha->hw; +	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);  	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);  	drv_active &= ~(1 << (ha->portnum)); @@ -1324,7 +1416,7 @@ qla8044_device_bootstrap(struct scsi_qla_host *vha)  	if (rval != QLA_SUCCESS) {  		ql_log(ql_log_info, vha, 0xb0b3,  		     "%s: HW State: FAILED\n", __func__); -		qla8044_clear_drv_active(vha); +		qla8044_clear_drv_active(ha);  		qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,  		    QLA8XXX_DEV_FAILED);  		return rval; @@ -1541,7 +1633,7 @@ static void  qla8044_need_reset_handler(struct scsi_qla_host *vha)  {  	uint32_t dev_state = 0, drv_state, drv_active; -	unsigned long reset_timeout, dev_init_timeout; +	unsigned long reset_timeout;  	struct qla_hw_data *ha = vha->hw;  	ql_log(ql_log_fatal, vha, 0xb0c2, @@ -1555,83 +1647,78 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)  		qla8044_idc_lock(ha);  	} -	if (!ha->flags.nic_core_reset_owner) { -		ql_dbg(ql_dbg_p3p, vha, 0xb0c3, -		    "%s(%ld): reset acknowledged\n", -		    __func__, vha->host_no); -		qla8044_set_rst_ready(vha); +	dev_state = qla8044_rd_direct(vha, +	    QLA8044_CRB_DEV_STATE_INDEX); +	drv_state = qla8044_rd_direct(vha, +	    QLA8044_CRB_DRV_STATE_INDEX); +	drv_active = qla8044_rd_direct(vha, +	    QLA8044_CRB_DRV_ACTIVE_INDEX); -		/* Non-reset owners ACK Reset and wait for device INIT state -		 * as part of Reset Recovery by Reset Owner -		 */ -		dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); +	ql_log(ql_log_info, vha, 0xb0c5, +	    "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n", +	    __func__, vha->host_no, drv_state, drv_active, dev_state); -		do { -			if (time_after_eq(jiffies, dev_init_timeout)) { -				ql_log(ql_log_info, vha, 0xb0c4, -				    "%s: Non Reset owner DEV INIT " -				    "TIMEOUT!\n", __func__); -				break; -			} +	qla8044_set_rst_ready(vha); -			qla8044_idc_unlock(ha); -			msleep(1000); -			qla8044_idc_lock(ha); +	/* wait for 10 seconds for reset ack from all functions */ +	reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); -			dev_state = qla8044_rd_direct(vha, -					QLA8044_CRB_DEV_STATE_INDEX); -		} while (dev_state == QLA8XXX_DEV_NEED_RESET); -	} else { -		qla8044_set_rst_ready(vha); +	do { +		if (time_after_eq(jiffies, reset_timeout)) { +			ql_log(ql_log_info, vha, 0xb0c4, +			    "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n", +			    __func__, ha->portnum, drv_state, drv_active); +			break; +		} -		/* wait for 10 seconds for reset ack from all functions */ -		reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); +		qla8044_idc_unlock(ha); +		msleep(1000); +		qla8044_idc_lock(ha); +		dev_state = qla8044_rd_direct(vha, +		    QLA8044_CRB_DEV_STATE_INDEX);  		drv_state = qla8044_rd_direct(vha,  		    QLA8044_CRB_DRV_STATE_INDEX);  		drv_active = qla8044_rd_direct(vha,  		    QLA8044_CRB_DRV_ACTIVE_INDEX); - -		ql_log(ql_log_info, vha, 0xb0c5, -		    "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", -		    __func__, vha->host_no, drv_state, drv_active); - -		while (drv_state != drv_active) { -			if (time_after_eq(jiffies, reset_timeout)) { -				ql_log(ql_log_info, vha, 0xb0c6, -				    "%s: RESET TIMEOUT!" -				    "drv_state: 0x%08x, drv_active: 0x%08x\n", -				    QLA2XXX_DRIVER_NAME, drv_state, drv_active); -				break; -			} - -			qla8044_idc_unlock(ha); -			msleep(1000); -			qla8044_idc_lock(ha); - -			drv_state = qla8044_rd_direct(vha, -			    QLA8044_CRB_DRV_STATE_INDEX); -			drv_active = qla8044_rd_direct(vha, -			    QLA8044_CRB_DRV_ACTIVE_INDEX); -		} - -		if (drv_state != drv_active) { -			ql_log(ql_log_info, vha, 0xb0c7, -			    "%s(%ld): Reset_owner turning off drv_active " -			    "of non-acking function 0x%x\n", __func__, -			    vha->host_no, (drv_active ^ drv_state)); -			drv_active = drv_active & drv_state; -			qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, -			    drv_active); +	} while (((drv_state & drv_active) != drv_active) && +	    (dev_state == QLA8XXX_DEV_NEED_RESET)); + +	/* Remove IDC participation of functions not acknowledging */ +	if (drv_state != drv_active) { +		ql_log(ql_log_info, vha, 0xb0c7, +		    "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n", +		    __func__, vha->host_no, ha->portnum, +		    (drv_active ^ drv_state)); +		drv_active = drv_active & drv_state; +		qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, +		    drv_active); +	} else { +		/* +		 * Reset owner should execute reset recovery, +		 * if all functions acknowledged +		 */ +		if ((ha->flags.nic_core_reset_owner) && +		    (dev_state == QLA8XXX_DEV_NEED_RESET)) { +			ha->flags.nic_core_reset_owner = 0; +			qla8044_device_bootstrap(vha); +			return;  		} +	} -		/* -		* Clear RESET OWNER, will be set at next reset -		* by next RST_OWNER -		*/ +	/* Exit if non active function */ +	if (!(drv_active & (1 << ha->portnum))) {  		ha->flags.nic_core_reset_owner = 0; +		return; +	} -		/* Start Reset Recovery */ +	/* +	 * Execute Reset Recovery if Reset Owner or Function 7 +	 * is the only active function +	 */ +	if (ha->flags.nic_core_reset_owner || +	    ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) { +		ha->flags.nic_core_reset_owner = 0;  		qla8044_device_bootstrap(vha);  	}  } @@ -1654,6 +1741,19 @@ qla8044_set_drv_active(struct scsi_qla_host *vha)  	qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);  } +static int +qla8044_check_drv_active(struct scsi_qla_host *vha) +{ +	uint32_t drv_active; +	struct qla_hw_data *ha = vha->hw; + +	drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX); +	if (drv_active & (1 << ha->portnum)) +		return QLA_SUCCESS; +	else +		return QLA_TEST_FAILED; +} +  static void  qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)  { @@ -1736,7 +1836,7 @@ qla8044_update_idc_reg(struct scsi_qla_host *vha)  	rval = qla8044_set_idc_ver(vha);  	if (rval == QLA_FUNCTION_FAILED) -		qla8044_clear_drv_active(vha); +		qla8044_clear_drv_active(ha);  	qla8044_idc_unlock(ha);  exit_update_idc_reg: @@ -1836,14 +1936,16 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)  	while (1) {  		if (time_after_eq(jiffies, dev_init_timeout)) { -			ql_log(ql_log_warn, vha, 0xb0cf, -			    "%s: Device Init Failed 0x%x = %s\n", -			    QLA2XXX_DRIVER_NAME, dev_state, -			    dev_state < MAX_STATES ? -			    qdev_state(dev_state) : "Unknown"); - -			qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, -			    QLA8XXX_DEV_FAILED); +			if (qla8044_check_drv_active(vha) == QLA_SUCCESS) { +				ql_log(ql_log_warn, vha, 0xb0cf, +				    "%s: Device Init Failed 0x%x = %s\n", +				    QLA2XXX_DRIVER_NAME, dev_state, +				    dev_state < MAX_STATES ? +				    qdev_state(dev_state) : "Unknown"); +				qla8044_wr_direct(vha, +				    QLA8044_CRB_DEV_STATE_INDEX, +				    QLA8XXX_DEV_FAILED); +			}  		}  		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); @@ -1859,7 +1961,7 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)  			goto exit;  		case QLA8XXX_DEV_COLD:  			rval = qla8044_device_bootstrap(vha); -			goto exit; +			break;  		case QLA8XXX_DEV_INITIALIZING:  			qla8044_idc_unlock(ha);  			msleep(1000); @@ -2013,11 +2115,16 @@ qla8044_watchdog(struct scsi_qla_host *vha)  	/* don't poll if reset is going on or FW hang in quiescent state */  	if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || -	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || -	    test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||  	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {  		dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX); +		if (qla8044_check_fw_alive(vha)) { +			ha->flags.isp82xx_fw_hung = 1; +			ql_log(ql_log_warn, vha, 0xb10a, +			    "Firmware hung.\n"); +			qla82xx_clear_pending_mbx(vha); +		} +  		if (qla8044_check_temp(vha)) {  			set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);  			ha->flags.isp82xx_fw_hung = 1; @@ -2038,7 +2145,7 @@ qla8044_watchdog(struct scsi_qla_host *vha)  			qla2xxx_wake_dpc(vha);  		} else  {  			/* Check firmware health */ -			if (qla8044_check_fw_alive(vha)) { +			if (ha->flags.isp82xx_fw_hung) {  				halt_status = qla8044_rd_direct(vha,  					QLA8044_PEG_HALT_STATUS1_INDEX);  				if (halt_status & @@ -2074,12 +2181,8 @@ qla8044_watchdog(struct scsi_qla_host *vha)  						    __func__);  						set_bit(ISP_ABORT_NEEDED,  						    &vha->dpc_flags); -						qla82xx_clear_pending_mbx(vha);  					}  				} -				ha->flags.isp82xx_fw_hung = 1; -				ql_log(ql_log_warn, vha, 0xb10a, -				    "Firmware hung.\n");  				qla2xxx_wake_dpc(vha);  			}  		} @@ -2253,7 +2356,7 @@ qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,  	if (r_addr & 0xf) {  		ql_dbg(ql_dbg_p3p, vha, 0xb0f1, -		    "[%s]: Read addr 0x%x not 16 bytes alligned\n", +		    "[%s]: Read addr 0x%x not 16 bytes aligned\n",  		    __func__, r_addr);  		return QLA_FUNCTION_FAILED;  	} @@ -2287,8 +2390,6 @@ qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,  		}  		if (j >= MAX_CTL_CHECK) { -			printk_ratelimited(KERN_ERR -			    "%s: failed to read through agent\n", __func__);  			write_unlock_irqrestore(&ha->hw_lock, flags);  			return QLA_SUCCESS;  		} @@ -2883,6 +2984,231 @@ error_exit:  	return rval;  } +static uint32_t +qla8044_minidump_process_rddfe(struct scsi_qla_host *vha, +	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ +	int loop_cnt; +	uint32_t addr1, addr2, value, data, temp, wrVal; +	uint8_t stride, stride2; +	uint16_t count; +	uint32_t poll, mask, data_size, modify_mask; +	uint32_t wait_count = 0; + +	uint32_t *data_ptr = *d_ptr; + +	struct qla8044_minidump_entry_rddfe *rddfe; +	rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr; + +	addr1 = rddfe->addr_1; +	value = rddfe->value; +	stride = rddfe->stride; +	stride2 = rddfe->stride2; +	count = rddfe->count; + +	poll = rddfe->poll; +	mask = rddfe->mask; +	modify_mask = rddfe->modify_mask; +	data_size = rddfe->data_size; + +	addr2 = addr1 + stride; + +	for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { +		qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value)); + +		wait_count = 0; +		while (wait_count < poll) { +			qla8044_rd_reg_indirect(vha, addr1, &temp); +			if ((temp & mask) != 0) +				break; +			wait_count++; +		} + +		if (wait_count == poll) { +			ql_log(ql_log_warn, vha, 0xb153, +			    "%s: TIMEOUT\n", __func__); +			goto error; +		} else { +			qla8044_rd_reg_indirect(vha, addr2, &temp); +			temp = temp & modify_mask; +			temp = (temp | ((loop_cnt << 16) | loop_cnt)); +			wrVal = ((temp << 16) | temp); + +			qla8044_wr_reg_indirect(vha, addr2, wrVal); +			qla8044_wr_reg_indirect(vha, addr1, value); + +			wait_count = 0; +			while (wait_count < poll) { +				qla8044_rd_reg_indirect(vha, addr1, &temp); +				if ((temp & mask) != 0) +					break; +				wait_count++; +			} +			if (wait_count == poll) { +				ql_log(ql_log_warn, vha, 0xb154, +				    "%s: TIMEOUT\n", __func__); +				goto error; +			} + +			qla8044_wr_reg_indirect(vha, addr1, +			    ((0x40000000 | value) + stride2)); +			wait_count = 0; +			while (wait_count < poll) { +				qla8044_rd_reg_indirect(vha, addr1, &temp); +				if ((temp & mask) != 0) +					break; +				wait_count++; +			} + +			if (wait_count == poll) { +				ql_log(ql_log_warn, vha, 0xb155, +				    "%s: TIMEOUT\n", __func__); +				goto error; +			} + +			qla8044_rd_reg_indirect(vha, addr2, &data); + +			*data_ptr++ = wrVal; +			*data_ptr++ = data; +		} + +	} + +	*d_ptr = data_ptr; +	return QLA_SUCCESS; + +error: +	return -1; + +} + +static uint32_t +qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha, +	struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ +	int ret = 0; +	uint32_t addr1, addr2, value1, value2, data, selVal; +	uint8_t stride1, stride2; +	uint32_t addr3, addr4, addr5, addr6, addr7; +	uint16_t count, loop_cnt; +	uint32_t poll, mask; +	uint32_t *data_ptr = *d_ptr; + +	struct qla8044_minidump_entry_rdmdio *rdmdio; + +	rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr; + +	addr1 = rdmdio->addr_1; +	addr2 = rdmdio->addr_2; +	value1 = rdmdio->value_1; +	stride1 = rdmdio->stride_1; +	stride2 = rdmdio->stride_2; +	count = rdmdio->count; + +	poll = rdmdio->poll; +	mask = rdmdio->mask; +	value2 = rdmdio->value_2; + +	addr3 = addr1 + stride1; + +	for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { +		ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, +		    addr3, mask); +		if (ret == -1) +			goto error; + +		addr4 = addr2 - stride1; +		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4, +		    value2); +		if (ret == -1) +			goto error; + +		addr5 = addr2 - (2 * stride1); +		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5, +		    value1); +		if (ret == -1) +			goto error; + +		addr6 = addr2 - (3 * stride1); +		ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, +		    addr6, 0x2); +		if (ret == -1) +			goto error; + +		ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2, +		    addr3, mask); +		if (ret == -1) +			goto error; + +		addr7 = addr2 - (4 * stride1); +			data = qla8044_ipmdio_rd_reg(vha, addr1, addr3, +			    mask, addr7); +		if (data == -1) +			goto error; + +		selVal = (value2 << 18) | (value1 << 2) | 2; + +		stride2 = rdmdio->stride_2; +		*data_ptr++ = selVal; +		*data_ptr++ = data; + +		value1 = value1 + stride2; +		*d_ptr = data_ptr; +	} + +	return 0; + +error: +	return -1; +} + +static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, +		struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) +{ +	uint32_t addr1, addr2, value1, value2, poll, mask, r_value; +	uint32_t wait_count = 0; +	struct qla8044_minidump_entry_pollwr *pollwr_hdr; + +	pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; +	addr1 = pollwr_hdr->addr_1; +	addr2 = pollwr_hdr->addr_2; +	value1 = pollwr_hdr->value_1; +	value2 = pollwr_hdr->value_2; + +	poll = pollwr_hdr->poll; +	mask = pollwr_hdr->mask; + +	while (wait_count < poll) { +		qla8044_rd_reg_indirect(vha, addr1, &r_value); + +		if ((r_value & poll) != 0) +			break; +		wait_count++; +	} + +	if (wait_count == poll) { +		ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__); +		goto error; +	} + +	qla8044_wr_reg_indirect(vha, addr2, value2); +	qla8044_wr_reg_indirect(vha, addr1, value1); + +	wait_count = 0; +	while (wait_count < poll) { +		qla8044_rd_reg_indirect(vha, addr1, &r_value); + +		if ((r_value & poll) != 0) +			break; +		wait_count++; +	} + +	return QLA_SUCCESS; + +error: +	return -1; +} +  /*   *   * qla8044_collect_md_data - Retrieve firmware minidump data. @@ -3090,6 +3416,24 @@ qla8044_collect_md_data(struct scsi_qla_host *vha)  			if (rval != QLA_SUCCESS)  				qla8044_mark_entry_skipped(vha, entry_hdr, i);  			break; +		case QLA8044_RDDFE: +			rval = qla8044_minidump_process_rddfe(vha, entry_hdr, +			    &data_ptr); +			if (rval != QLA_SUCCESS) +				qla8044_mark_entry_skipped(vha, entry_hdr, i); +			break; +		case QLA8044_RDMDIO: +			rval = qla8044_minidump_process_rdmdio(vha, entry_hdr, +			    &data_ptr); +			if (rval != QLA_SUCCESS) +				qla8044_mark_entry_skipped(vha, entry_hdr, i); +			break; +		case QLA8044_POLLWR: +			rval = qla8044_minidump_process_pollwr(vha, entry_hdr, +			    &data_ptr); +			if (rval != QLA_SUCCESS) +				qla8044_mark_entry_skipped(vha, entry_hdr, i); +			break;  		case QLA82XX_RDNOP:  		default:  			qla8044_mark_entry_skipped(vha, entry_hdr, i); @@ -3111,6 +3455,7 @@ skip_nxt_entry:  		    "Dump data mismatch: Data collected: "  		    "[0x%x], total_data_size:[0x%x]\n",  		    data_collected, ha->md_dump_size); +		rval = QLA_FUNCTION_FAILED;  		goto md_failed;  	} @@ -3135,10 +3480,12 @@ qla8044_get_minidump(struct scsi_qla_host *vha)  	if (!qla8044_collect_md_data(vha)) {  		ha->fw_dumped = 1; +		ha->prev_minidump_failed = 0;  	} else {  		ql_log(ql_log_fatal, vha, 0xb0db,  		    "%s: Unable to collect minidump\n",  		    __func__); +		ha->prev_minidump_failed = 1;  	}  } @@ -3714,3 +4061,19 @@ exit_isp_reset:  	return rval;  } +void +qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked) +{ +	struct qla_hw_data *ha = vha->hw; + +	if (!ha->allow_cna_fw_dump) +		return; + +	scsi_block_requests(vha->host); +	ha->flags.isp82xx_no_md_cap = 1; +	qla8044_idc_lock(ha); +	qla82xx_set_reset_owner(vha); +	qla8044_idc_unlock(ha); +	qla2x00_wait_for_chip_reset(vha); +	scsi_unblock_requests(vha->host); +} diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h index 2ab2eabab90..ada36057d7c 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.h +++ b/drivers/scsi/qla2xxx/qla_nx2.h @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -133,6 +133,7 @@  #define QLA8044_LINK_SPEED(f)		(0x36E0+(((f) >> 2) * 4))  #define QLA8044_MAX_LINK_SPEED(f)       (0x36F0+(((f) / 4) * 4))  #define QLA8044_LINK_SPEED_FACTOR	10 +#define QLA8044_FUN7_ACTIVE_INDEX	0x80  /* FLASH API Defines */  #define QLA8044_FLASH_MAX_WAIT_USEC	100 @@ -431,6 +432,50 @@ struct qla8044_minidump_entry_pollrd {  	uint32_t rsvd_1;  } __packed; +struct qla8044_minidump_entry_rddfe { +	struct qla8044_minidump_entry_hdr h; +	uint32_t addr_1; +	uint32_t value; +	uint8_t stride; +	uint8_t stride2; +	uint16_t count; +	uint32_t poll; +	uint32_t mask; +	uint32_t modify_mask; +	uint32_t data_size; +	uint32_t rsvd; + +} __packed; + +struct qla8044_minidump_entry_rdmdio { +	struct qla8044_minidump_entry_hdr h; + +	uint32_t addr_1; +	uint32_t addr_2; +	uint32_t value_1; +	uint8_t stride_1; +	uint8_t stride_2; +	uint16_t count; +	uint32_t poll; +	uint32_t mask; +	uint32_t value_2; +	uint32_t data_size; + +} __packed; + +struct qla8044_minidump_entry_pollwr { +	struct qla8044_minidump_entry_hdr h; +	uint32_t addr_1; +	uint32_t addr_2; +	uint32_t value_1; +	uint32_t value_2; +	uint32_t poll; +	uint32_t mask; +	uint32_t data_size; +	uint32_t rsvd; + +}  __packed; +  /* RDMUX2 Entry */  struct qla8044_minidump_entry_rdmux2 {  	struct qla8044_minidump_entry_hdr h; @@ -516,6 +561,9 @@ static const uint32_t qla8044_reg_tbl[] = {  #define QLA8044_DBG_RSVD_ARRAY_LEN              8  #define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN        16  #define QLA8044_SS_PCI_INDEX                    0 +#define QLA8044_RDDFE          38 +#define QLA8044_RDMDIO         39 +#define QLA8044_POLLWR         40  struct qla8044_minidump_template_hdr {  	uint32_t entry_type; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 9f01bbbf3a2..d96bfb55e57 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -110,7 +110,8 @@ MODULE_PARM_DESC(ql2xfdmienable,  		"Enables FDMI registrations. "  		"0 - no FDMI. Default is 1 - perform FDMI."); -int ql2xmaxqdepth = MAX_Q_DEPTH; +#define MAX_Q_DEPTH	32 +static int ql2xmaxqdepth = MAX_Q_DEPTH;  module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);  MODULE_PARM_DESC(ql2xmaxqdepth,  		"Maximum queue depth to set for each LUN. " @@ -119,15 +120,17 @@ MODULE_PARM_DESC(ql2xmaxqdepth,  int ql2xenabledif = 2;  module_param(ql2xenabledif, int, S_IRUGO);  MODULE_PARM_DESC(ql2xenabledif, -		" Enable T10-CRC-DIF " -		" Default is 0 - No DIF Support. 1 - Enable it" -		", 2 - Enable DIF for all types, except Type 0."); +		" Enable T10-CRC-DIF:\n" +		" Default is 2.\n" +		"  0 -- No DIF Support\n" +		"  1 -- Enable DIF for all types\n" +		"  2 -- Enable DIF for all types, except Type 0.\n");  int ql2xenablehba_err_chk = 2;  module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);  MODULE_PARM_DESC(ql2xenablehba_err_chk,  		" Enable T10-CRC-DIF Error isolation by HBA:\n" -		" Default is 1.\n" +		" Default is 2.\n"  		"  0 -- Error isolation disabled\n"  		"  1 -- Error isolation enabled only for DIX Type 0\n"  		"  2 -- Error isolation enabled for all Types\n"); @@ -494,18 +497,14 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)  	static char *pci_bus_modes[] = { "33", "66", "100", "133", };  	struct qla_hw_data *ha = vha->hw;  	uint32_t pci_bus; -	int pcie_reg; -	pcie_reg = pci_pcie_cap(ha->pdev); -	if (pcie_reg) { +	if (pci_is_pcie(ha->pdev)) {  		char lwstr[6]; -		uint16_t pcie_lstat, lspeed, lwidth; +		uint32_t lstat, lspeed, lwidth; -		pcie_reg += PCI_EXP_LNKCAP; -		pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat); -		lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3); -		lwidth = (pcie_lstat & -		    (BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4; +		pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); +		lspeed = lstat & PCI_EXP_LNKCAP_SLS; +		lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4;  		strcpy(str, "PCIe (");  		switch (lspeed) { @@ -617,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)  	if (sp->flags & SRB_CRC_CTX_DSD_VALID) {  		/* List assured to be having elements */ -		qla2x00_clean_dsd_pool(ha, sp); +		qla2x00_clean_dsd_pool(ha, sp, NULL);  		sp->flags &= ~SRB_CRC_CTX_DSD_VALID;  	} @@ -732,10 +731,8 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)  	}  	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); -	if (!sp) { -		set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags); +	if (!sp)  		goto qc24_host_busy; -	}  	sp->u.scmd.cmd = cmd;  	sp->type = SRB_SCSI_CMD; @@ -748,7 +745,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)  	if (rval != QLA_SUCCESS) {  		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,  		    "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); -		set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);  		goto qc24_host_busy_free_sp;  	} @@ -785,7 +781,7 @@ static int  qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)  {  #define ABORT_POLLING_PERIOD	1000 -#define ABORT_WAIT_ITER		((10 * 1000) / (ABORT_POLLING_PERIOD)) +#define ABORT_WAIT_ITER		((2 * 1000) / (ABORT_POLLING_PERIOD))  	unsigned long wait_iter = ABORT_WAIT_ITER;  	scsi_qla_host_t *vha = shost_priv(cmd->device->host);  	struct qla_hw_data *ha = vha->hw; @@ -848,11 +844,8 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)  }  /* - * qla2x00_wait_for_reset_ready - *    Wait till the HBA is online after going through - *    <= MAX_RETRIES_OF_ISP_ABORT  or - *    finally HBA is disabled ie marked offline or flash - *    operations are in progress. + * qla2x00_wait_for_hba_ready + * Wait till the HBA is ready before doing driver unload   *   * Input:   *     ha - pointer to host adapter structure @@ -861,35 +854,15 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)   *    Does context switching-Release SPIN_LOCK   *    (if any) before calling this routine.   * - * Return: - *    Success (Adapter is online/no flash ops) : 0 - *    Failed  (Adapter is offline/disabled/flash ops in progress) : 1   */ -static int -qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha) +static void +qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)  { -	int		return_status; -	unsigned long	wait_online;  	struct qla_hw_data *ha = vha->hw; -	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); -	wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ); -	while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) || -	    test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || -	    test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || -	    ha->optrom_state != QLA_SWAITING || -	    ha->dpc_active) && time_before(jiffies, wait_online)) +	while ((!(vha->flags.online) || ha->dpc_active || +	    ha->flags.mbox_busy))  		msleep(1000); - -	if (base_vha->flags.online &&  ha->optrom_state == QLA_SWAITING) -		return_status = QLA_SUCCESS; -	else -		return_status = QLA_FUNCTION_FAILED; - -	ql_dbg(ql_dbg_taskm, vha, 0x8019, -	    "%s return status=%d.\n", __func__, return_status); - -	return return_status;  }  int @@ -949,7 +922,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)  	int ret;  	unsigned int id, lun;  	unsigned long flags; -	int wait = 0; +	int rval, wait = 0;  	struct qla_hw_data *ha = vha->hw;  	if (!CMD_SP(cmd)) @@ -978,10 +951,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)  	sp_get(sp);  	spin_unlock_irqrestore(&ha->hardware_lock, flags); -	if (ha->isp_ops->abort_command(sp)) { -		ret = FAILED; +	rval = ha->isp_ops->abort_command(sp); +	if (rval) { +		if (rval == QLA_FUNCTION_PARAMETER_ERROR) { +			/* +			 * Decrement the ref_count since we can't find the +			 * command +			 */ +			atomic_dec(&sp->ref_count); +			ret = SUCCESS; +		} else +			ret = FAILED; +  		ql_dbg(ql_dbg_taskm, vha, 0x8003, -		    "Abort command mbx failed cmd=%p.\n", cmd); +		    "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);  	} else {  		ql_dbg(ql_dbg_taskm, vha, 0x8004,  		    "Abort command mbx success cmd=%p.\n", cmd); @@ -989,6 +972,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)  	}  	spin_lock_irqsave(&ha->hardware_lock, flags); +	/* +	 * Clear the slot in the oustanding_cmds array if we can't find the +	 * command to reclaim the resources. +	 */ +	if (rval == QLA_FUNCTION_PARAMETER_ERROR) +		vha->req->outstanding_cmds[sp->handle] = NULL;  	sp->done(ha, sp, 0);  	spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1240,7 +1229,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)  	ql_log(ql_log_info, vha, 0x8018,  	    "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun); -	if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS) +	/* +	 * No point in issuing another reset if one is active.  Also do not +	 * attempt a reset if we are updating flash. +	 */ +	if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)  		goto eh_host_reset_lock;  	if (vha != base_vha) { @@ -1478,81 +1471,6 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)  	return tag_type;  } -static void -qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha) -{ -	scsi_qla_host_t *vp; -	struct Scsi_Host *shost; -	struct scsi_device *sdev; -	struct qla_hw_data *ha = vha->hw; -	unsigned long flags; - -	ha->host_last_rampdown_time = jiffies; - -	if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun) -		return; - -	if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun) -		ha->cfg_lun_q_depth = vha->host->cmd_per_lun; -	else -		ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2; - -	/* -	 * Geometrically ramp down the queue depth for all devices on this -	 * adapter -	 */ -	spin_lock_irqsave(&ha->vport_slock, flags); -	list_for_each_entry(vp, &ha->vp_list, list) { -		shost = vp->host; -		shost_for_each_device(sdev, shost) { -			if (sdev->queue_depth > shost->cmd_per_lun) { -				if (sdev->queue_depth < ha->cfg_lun_q_depth) -					continue; -				ql_dbg(ql_dbg_io, vp, 0x3031, -				    "%ld:%d:%d: Ramping down queue depth to %d", -				    vp->host_no, sdev->id, sdev->lun, -				    ha->cfg_lun_q_depth); -				qla2x00_change_queue_depth(sdev, -				    ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT); -			} -		} -	} -	spin_unlock_irqrestore(&ha->vport_slock, flags); - -	return; -} - -static void -qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha) -{ -	scsi_qla_host_t *vp; -	struct Scsi_Host *shost; -	struct scsi_device *sdev; -	struct qla_hw_data *ha = vha->hw; -	unsigned long flags; - -	ha->host_last_rampup_time = jiffies; -	ha->cfg_lun_q_depth++; - -	/* -	 * Linearly ramp up the queue depth for all devices on this -	 * adapter -	 */ -	spin_lock_irqsave(&ha->vport_slock, flags); -	list_for_each_entry(vp, &ha->vp_list, list) { -		shost = vp->host; -		shost_for_each_device(sdev, shost) { -			if (sdev->queue_depth > ha->cfg_lun_q_depth) -				continue; -			qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth, -			    SCSI_QDEPTH_RAMP_UP); -		} -	} -	spin_unlock_irqrestore(&ha->vport_slock, flags); - -	return; -} -  /**   * qla2x00_config_dma_addressing() - Configure OS DMA addressing method.   * @ha: HA context @@ -2056,7 +1974,7 @@ static struct isp_operations qla82xx_isp_ops = {  	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,  	.read_nvram		= qla24xx_read_nvram_data,  	.write_nvram		= qla24xx_write_nvram_data, -	.fw_dump		= qla24xx_fw_dump, +	.fw_dump		= qla82xx_fw_dump,  	.beacon_on		= qla82xx_beacon_on,  	.beacon_off		= qla82xx_beacon_off,  	.beacon_blink		= NULL, @@ -2094,11 +2012,11 @@ static struct isp_operations qla8044_isp_ops = {  	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb,  	.read_nvram		= NULL,  	.write_nvram		= NULL, -	.fw_dump		= qla24xx_fw_dump, +	.fw_dump		= qla8044_fw_dump,  	.beacon_on		= qla82xx_beacon_on,  	.beacon_off		= qla82xx_beacon_off,  	.beacon_blink		= NULL, -	.read_optrom		= qla82xx_read_optrom_data, +	.read_optrom		= qla8044_read_optrom_data,  	.write_optrom		= qla8044_write_optrom_data,  	.get_flash_version	= qla82xx_get_flash_version,  	.start_scsi             = qla82xx_start_scsi, @@ -2159,7 +2077,7 @@ static struct isp_operations qlafx00_isp_ops = {  	.intr_handler		= qlafx00_intr_handler,  	.enable_intrs		= qlafx00_enable_intrs,  	.disable_intrs		= qlafx00_disable_intrs, -	.abort_command		= qlafx00_abort_command, +	.abort_command		= qla24xx_async_abort_command,  	.target_reset		= qlafx00_abort_target,  	.lun_reset		= qlafx00_lun_reset,  	.fabric_login		= NULL, @@ -2183,6 +2101,44 @@ static struct isp_operations qlafx00_isp_ops = {  	.initialize_adapter	= qlafx00_initialize_adapter,  }; +static struct isp_operations qla27xx_isp_ops = { +	.pci_config		= qla25xx_pci_config, +	.reset_chip		= qla24xx_reset_chip, +	.chip_diag		= qla24xx_chip_diag, +	.config_rings		= qla24xx_config_rings, +	.reset_adapter		= qla24xx_reset_adapter, +	.nvram_config		= qla81xx_nvram_config, +	.update_fw_options	= qla81xx_update_fw_options, +	.load_risc		= qla81xx_load_risc, +	.pci_info_str		= qla24xx_pci_info_str, +	.fw_version_str		= qla24xx_fw_version_str, +	.intr_handler		= qla24xx_intr_handler, +	.enable_intrs		= qla24xx_enable_intrs, +	.disable_intrs		= qla24xx_disable_intrs, +	.abort_command		= qla24xx_abort_command, +	.target_reset		= qla24xx_abort_target, +	.lun_reset		= qla24xx_lun_reset, +	.fabric_login		= qla24xx_login_fabric, +	.fabric_logout		= qla24xx_fabric_logout, +	.calc_req_entries	= NULL, +	.build_iocbs		= NULL, +	.prep_ms_iocb		= qla24xx_prep_ms_iocb, +	.prep_ms_fdmi_iocb	= qla24xx_prep_ms_fdmi_iocb, +	.read_nvram		= NULL, +	.write_nvram		= NULL, +	.fw_dump		= qla27xx_fwdump, +	.beacon_on		= qla24xx_beacon_on, +	.beacon_off		= qla24xx_beacon_off, +	.beacon_blink		= qla83xx_beacon_blink, +	.read_optrom		= qla25xx_read_optrom_data, +	.write_optrom		= qla24xx_write_optrom_data, +	.get_flash_version	= qla24xx_get_flash_version, +	.start_scsi		= qla24xx_dif_start_scsi, +	.abort_isp		= qla2x00_abort_isp, +	.iospace_config		= qla83xx_iospace_config, +	.initialize_adapter	= qla2x00_initialize_adapter, +}; +  static inline void  qla2x00_set_isp_flags(struct qla_hw_data *ha)  { @@ -2304,21 +2260,36 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)  	case PCI_DEVICE_ID_QLOGIC_ISPF001:  		ha->device_type |= DT_ISPFX00;  		break; +	case PCI_DEVICE_ID_QLOGIC_ISP2071: +		ha->device_type |= DT_ISP2071; +		ha->device_type |= DT_ZIO_SUPPORTED; +		ha->device_type |= DT_FWI2; +		ha->device_type |= DT_IIDMA; +		ha->fw_srisc_address = RISC_START_ADDRESS_2400; +		break; +	case PCI_DEVICE_ID_QLOGIC_ISP2271: +		ha->device_type |= DT_ISP2271; +		ha->device_type |= DT_ZIO_SUPPORTED; +		ha->device_type |= DT_FWI2; +		ha->device_type |= DT_IIDMA; +		ha->fw_srisc_address = RISC_START_ADDRESS_2400; +		break;  	}  	if (IS_QLA82XX(ha)) -		ha->port_no = !(ha->portnum & 1); -	else +		ha->port_no = ha->portnum & 1; +	else {  		/* Get adapter physical port no from interrupt pin register. */  		pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); +		if (IS_QLA27XX(ha)) +			ha->port_no--; +		else +			ha->port_no = !(ha->port_no & 1); +	} -	if (ha->port_no & 1) -		ha->flags.port0 = 1; -	else -		ha->flags.port0 = 0;  	ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,  	    "device_type=0x%x port=%d fw_srisc_address=0x%x.\n", -	    ha->device_type, ha->flags.port0, ha->fw_srisc_address); +	    ha->device_type, ha->port_no, ha->fw_srisc_address);  }  static void @@ -2378,7 +2349,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||  	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||  	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || -	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) { +	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || +	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || +	    pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) {  		bars = pci_select_bars(pdev, IORESOURCE_MEM);  		mem_only = 1;  		ql_dbg_pci(ql_dbg_init, pdev, 0x0007, @@ -2415,20 +2388,20 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	spin_lock_init(&ha->hardware_lock);  	spin_lock_init(&ha->vport_slock);  	mutex_init(&ha->selflogin_lock); +	mutex_init(&ha->optrom_mutex);  	/* Set ISP-type information. */  	qla2x00_set_isp_flags(ha);  	/* Set EEH reset type to fundamental if required by hba */  	if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || -	    IS_QLA83XX(ha)) +	    IS_QLA83XX(ha) || IS_QLA27XX(ha))  		pdev->needs_freset = 1;  	ha->prev_topology = 0;  	ha->init_cb_size = sizeof(init_cb_t);  	ha->link_data_rate = PORT_SPEED_UNKNOWN;  	ha->optrom_size = OPTROM_SIZE_2300; -	ha->cfg_lun_q_depth = ql2xmaxqdepth;  	/* Assign ISP specific operations. */  	if (IS_QLA2100(ha)) { @@ -2570,13 +2543,30 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  		ha->aen_mbx_count = AEN_MAILBOX_REGISTER_COUNT_FX00;  		req_length = REQUEST_ENTRY_CNT_FX00;  		rsp_length = RESPONSE_ENTRY_CNT_FX00; -		ha->init_cb_size = sizeof(struct init_cb_fx);  		ha->isp_ops = &qlafx00_isp_ops;  		ha->port_down_retry_count = 30; /* default value */  		ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;  		ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;  		ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;  		ha->mr.fw_hbt_en = 1; +		ha->mr.host_info_resend = false; +		ha->mr.hinfo_resend_timer_tick = QLAFX00_HINFO_RESEND_INTERVAL; +	} else if (IS_QLA27XX(ha)) { +		ha->portnum = PCI_FUNC(ha->pdev->devfn); +		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; +		ha->mbx_count = MAILBOX_REGISTER_COUNT; +		req_length = REQUEST_ENTRY_CNT_24XX; +		rsp_length = RESPONSE_ENTRY_CNT_2300; +		ha->max_loop_id = SNS_LAST_LOOP_ID_2300; +		ha->init_cb_size = sizeof(struct mid_init_cb_81xx); +		ha->gid_list_info_size = 8; +		ha->optrom_size = OPTROM_SIZE_83XX; +		ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; +		ha->isp_ops = &qla27xx_isp_ops; +		ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX; +		ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; +		ha->nvram_conf_off = ~0; +		ha->nvram_data_off = ~0;  	}  	ql_dbg_pci(ql_dbg_init, pdev, 0x001e, @@ -2616,7 +2606,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	    ha->flags.enable_64bit_addressing ? "enable" :  	    "disable");  	ret = qla2x00_mem_alloc(ha, req_length, rsp_length, &req, &rsp); -	if (!ret) { +	if (ret) {  		ql_log_pci(ql_log_fatal, pdev, 0x0031,  		    "Failed to allocate memory for adapter, aborting.\n"); @@ -2641,10 +2631,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	host = base_vha->host;  	base_vha->req = req; -	if (IS_QLAFX00(ha)) -		host->can_queue = 1024; -	else -		host->can_queue = req->length + 128;  	if (IS_QLA2XXX_MIDTYPE(ha))  		base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;  	else @@ -2667,11 +2653,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  		if (!IS_QLA82XX(ha))  			host->sg_tablesize = QLA_SG_ALL;  	} -	ql_dbg(ql_dbg_init, base_vha, 0x0032, -	    "can_queue=%d, req=%p, " -	    "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", -	    host->can_queue, base_vha->req, -	    base_vha->mgmt_svr_loop_id, host->sg_tablesize);  	host->max_id = ha->max_fibre_devices;  	host->cmd_per_lun = 3;  	host->unique_id = host->host_no; @@ -2726,7 +2707,7 @@ que_init:  	req->req_q_out = &ha->iobase->isp24.req_q_out;  	rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;  	rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; -	if (ha->mqenable || IS_QLA83XX(ha)) { +	if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {  		req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;  		req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;  		rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; @@ -2787,6 +2768,16 @@ que_init:  		goto probe_failed;  	} +	if (IS_QLAFX00(ha)) +		host->can_queue = QLAFX00_MAX_CANQUEUE; +	else +		host->can_queue = req->num_outstanding_cmds - 10; + +	ql_dbg(ql_dbg_init, base_vha, 0x0032, +	    "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", +	    host->can_queue, base_vha->req, +	    base_vha->mgmt_svr_loop_id, host->sg_tablesize); +  	if (ha->mqenable) {  		if (qla25xx_setup_mode(base_vha)) {  			ql_log(ql_log_warn, base_vha, 0x00ec, @@ -2820,6 +2811,8 @@ que_init:  	 */  	qla2xxx_wake_dpc(base_vha); +	INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); +  	if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {  		sprintf(wq_name, "qla2xxx_%lu_dpc_lp_wq", base_vha->host_no);  		ha->dpc_lp_wq = create_singlethread_workqueue(wq_name); @@ -2889,6 +2882,7 @@ skip_dpc:  	base_vha->flags.init_done = 1;  	base_vha->flags.online = 1; +	ha->prev_minidump_failed = 0;  	ql_dbg(ql_dbg_init, base_vha, 0x00f2,  	    "Init done and hba is online.\n"); @@ -2959,15 +2953,15 @@ probe_hw_failed:  	}  	if (IS_QLA8044(ha)) {  		qla8044_idc_lock(ha); -		qla8044_clear_drv_active(base_vha); +		qla8044_clear_drv_active(ha);  		qla8044_idc_unlock(ha);  	}  iospace_config_failed:  	if (IS_P3P_TYPE(ha)) {  		if (!ha->nx_pcibase) -			iounmap((device_reg_t __iomem *)ha->nx_pcibase); +			iounmap((device_reg_t *)ha->nx_pcibase);  		if (!ql2xdbwr) -			iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); +			iounmap((device_reg_t *)ha->nxdb_wr_ptr);  	} else {  		if (ha->iobase)  			iounmap(ha->iobase); @@ -2984,22 +2978,6 @@ probe_out:  }  static void -qla2x00_stop_dpc_thread(scsi_qla_host_t *vha) -{ -	struct qla_hw_data *ha = vha->hw; -	struct task_struct *t = ha->dpc_thread; - -	if (ha->dpc_thread == NULL) -		return; -	/* -	 * qla2xxx_wake_dpc checks for ->dpc_thread -	 * so we need to zero it out. -	 */ -	ha->dpc_thread = NULL; -	kthread_stop(t); -} - -static void  qla2x00_shutdown(struct pci_dev *pdev)  {  	scsi_qla_host_t *vha; @@ -3042,29 +3020,14 @@ qla2x00_shutdown(struct pci_dev *pdev)  	qla2x00_free_fw_dump(ha);  } +/* Deletes all the virtual ports for a given ha */  static void -qla2x00_remove_one(struct pci_dev *pdev) +qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)  { -	scsi_qla_host_t *base_vha, *vha; -	struct qla_hw_data  *ha; +	struct Scsi_Host *scsi_host; +	scsi_qla_host_t *vha;  	unsigned long flags; -	/* -	 * If the PCI device is disabled that means that probe failed and any -	 * resources should be have cleaned up on probe exit. -	 */ -	if (!atomic_read(&pdev->enable_cnt)) -		return; - -	base_vha = pci_get_drvdata(pdev); -	ha = base_vha->hw; - -	ha->flags.host_shutting_down = 1; - -	set_bit(UNLOADING, &base_vha->dpc_flags); -	if (IS_QLAFX00(ha)) -		qlafx00_driver_shutdown(base_vha, 20); -  	mutex_lock(&ha->vport_lock);  	while (ha->cur_vport_count) {  		spin_lock_irqsave(&ha->vport_slock, flags); @@ -3072,7 +3035,7 @@ qla2x00_remove_one(struct pci_dev *pdev)  		BUG_ON(base_vha->list.next == &ha->vp_list);  		/* This assumes first entry in ha->vp_list is always base vha */  		vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); -		scsi_host_get(vha->host); +		scsi_host = scsi_host_get(vha->host);  		spin_unlock_irqrestore(&ha->vport_slock, flags);  		mutex_unlock(&ha->vport_lock); @@ -3083,27 +3046,12 @@ qla2x00_remove_one(struct pci_dev *pdev)  		mutex_lock(&ha->vport_lock);  	}  	mutex_unlock(&ha->vport_lock); +} -	if (IS_QLA8031(ha)) { -		ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, -		    "Clearing fcoe driver presence.\n"); -		if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) -			ql_dbg(ql_dbg_p3p, base_vha, 0xb079, -			    "Error while clearing DRV-Presence.\n"); -	} - -	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); - -	qla2x00_dfs_remove(base_vha); - -	qla84xx_put_chip(base_vha); - -	/* Disable timer */ -	if (base_vha->timer_active) -		qla2x00_stop_timer(base_vha); - -	base_vha->flags.online = 0; - +/* Stops all deferred work threads */ +static void +qla2x00_destroy_deferred_work(struct qla_hw_data *ha) +{  	/* Flush the work queue and remove it */  	if (ha->wq) {  		flush_workqueue(ha->wq); @@ -3137,45 +3085,110 @@ qla2x00_remove_one(struct pci_dev *pdev)  		ha->dpc_thread = NULL;  		kthread_stop(t);  	} -	qlt_remove_target(ha, base_vha); +} + +static void +qla2x00_unmap_iobases(struct qla_hw_data *ha) +{ +	if (IS_QLA82XX(ha)) { -	qla2x00_free_sysfs_attr(base_vha); +		iounmap((device_reg_t *)ha->nx_pcibase); +		if (!ql2xdbwr) +			iounmap((device_reg_t *)ha->nxdb_wr_ptr); +	} else { +		if (ha->iobase) +			iounmap(ha->iobase); -	fc_remove_host(base_vha->host); +		if (ha->cregbase) +			iounmap(ha->cregbase); -	scsi_remove_host(base_vha->host); +		if (ha->mqiobase) +			iounmap(ha->mqiobase); -	qla2x00_free_device(base_vha); +		if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase) +			iounmap(ha->msixbase); +	} +} -	scsi_host_put(base_vha->host); +static void +qla2x00_clear_drv_active(scsi_qla_host_t *vha) +{ +	struct qla_hw_data *ha = vha->hw;  	if (IS_QLA8044(ha)) {  		qla8044_idc_lock(ha); -		qla8044_clear_drv_active(base_vha); +		qla8044_clear_drv_active(ha);  		qla8044_idc_unlock(ha); -	} -	if (IS_QLA82XX(ha)) { +	} else if (IS_QLA82XX(ha)) {  		qla82xx_idc_lock(ha);  		qla82xx_clear_drv_active(ha);  		qla82xx_idc_unlock(ha); +	} +} -		iounmap((device_reg_t __iomem *)ha->nx_pcibase); -		if (!ql2xdbwr) -			iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr); -	} else { -		if (ha->iobase) -			iounmap(ha->iobase); +static void +qla2x00_remove_one(struct pci_dev *pdev) +{ +	scsi_qla_host_t *base_vha; +	struct qla_hw_data  *ha; -		if (ha->cregbase) -			iounmap(ha->cregbase); +	/* +	 * If the PCI device is disabled that means that probe failed and any +	 * resources should be have cleaned up on probe exit. +	 */ +	if (!atomic_read(&pdev->enable_cnt)) +		return; -		if (ha->mqiobase) -			iounmap(ha->mqiobase); +	base_vha = pci_get_drvdata(pdev); +	ha = base_vha->hw; -		if (IS_QLA83XX(ha) && ha->msixbase) -			iounmap(ha->msixbase); +	qla2x00_wait_for_hba_ready(base_vha); + +	set_bit(UNLOADING, &base_vha->dpc_flags); + +	if (IS_QLAFX00(ha)) +		qlafx00_driver_shutdown(base_vha, 20); + +	qla2x00_delete_all_vps(ha, base_vha); + +	if (IS_QLA8031(ha)) { +		ql_dbg(ql_dbg_p3p, base_vha, 0xb07e, +		    "Clearing fcoe driver presence.\n"); +		if (qla83xx_clear_drv_presence(base_vha) != QLA_SUCCESS) +			ql_dbg(ql_dbg_p3p, base_vha, 0xb079, +			    "Error while clearing DRV-Presence.\n");  	} +	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + +	qla2x00_dfs_remove(base_vha); + +	qla84xx_put_chip(base_vha); + +	/* Disable timer */ +	if (base_vha->timer_active) +		qla2x00_stop_timer(base_vha); + +	base_vha->flags.online = 0; + +	qla2x00_destroy_deferred_work(ha); + +	qlt_remove_target(ha, base_vha); + +	qla2x00_free_sysfs_attr(base_vha, true); + +	fc_remove_host(base_vha->host); + +	scsi_remove_host(base_vha->host); + +	qla2x00_free_device(base_vha); + +	scsi_host_put(base_vha->host); + +	qla2x00_clear_drv_active(base_vha); + +	qla2x00_unmap_iobases(ha); +  	pci_release_selected_regions(ha->pdev, ha->bars);  	kfree(ha);  	ha = NULL; @@ -3183,7 +3196,6 @@ qla2x00_remove_one(struct pci_dev *pdev)  	pci_disable_pcie_error_reporting(pdev);  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  }  static void @@ -3197,9 +3209,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)  	if (vha->timer_active)  		qla2x00_stop_timer(vha); -	qla2x00_stop_dpc_thread(vha); -  	qla25xx_delete_queues(vha); +  	if (ha->flags.fce_enabled)  		qla2x00_disable_fce_trace(vha, NULL, NULL); @@ -3510,7 +3521,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,  		ha->npiv_info = NULL;  	/* Get consistent memory allocated for EX-INIT-CB. */ -	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) { +	if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {  		ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,  		    &ha->ex_init_cb_dma);  		if (!ha->ex_init_cb) @@ -3541,10 +3552,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,  	else {  		qla2x00_set_reserved_loop_ids(ha);  		ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, -		    "loop_id_map=%p. \n", ha->loop_id_map); +		    "loop_id_map=%p.\n", ha->loop_id_map);  	} -	return 1; +	return 0;  fail_async_pd:  	dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); @@ -3625,22 +3636,29 @@ static void  qla2x00_free_fw_dump(struct qla_hw_data *ha)  {  	if (ha->fce) -		dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, -		    ha->fce_dma); +		dma_free_coherent(&ha->pdev->dev, +		    FCE_SIZE, ha->fce, ha->fce_dma); -	if (ha->fw_dump) { -		if (ha->eft) -			dma_free_coherent(&ha->pdev->dev, -			    ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); +	if (ha->eft) +		dma_free_coherent(&ha->pdev->dev, +		    EFT_SIZE, ha->eft, ha->eft_dma); + +	if (ha->fw_dump)  		vfree(ha->fw_dump); -	} +	if (ha->fw_dump_template) +		vfree(ha->fw_dump_template); +  	ha->fce = NULL;  	ha->fce_dma = 0;  	ha->eft = NULL;  	ha->eft_dma = 0; -	ha->fw_dump = NULL;  	ha->fw_dumped = 0; +	ha->fw_dump_cap_flags = 0;  	ha->fw_dump_reading = 0; +	ha->fw_dump = NULL; +	ha->fw_dump_len = 0; +	ha->fw_dump_template = NULL; +	ha->fw_dump_template_len = 0;  }  /* @@ -4736,6 +4754,66 @@ exit:  	return rval;  } +void +qla2x00_disable_board_on_pci_error(struct work_struct *work) +{ +	struct qla_hw_data *ha = container_of(work, struct qla_hw_data, +	    board_disable); +	struct pci_dev *pdev = ha->pdev; +	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + +	ql_log(ql_log_warn, base_vha, 0x015b, +	    "Disabling adapter.\n"); + +	set_bit(UNLOADING, &base_vha->dpc_flags); + +	qla2x00_delete_all_vps(ha, base_vha); + +	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); + +	qla2x00_dfs_remove(base_vha); + +	qla84xx_put_chip(base_vha); + +	if (base_vha->timer_active) +		qla2x00_stop_timer(base_vha); + +	base_vha->flags.online = 0; + +	qla2x00_destroy_deferred_work(ha); + +	/* +	 * Do not try to stop beacon blink as it will issue a mailbox +	 * command. +	 */ +	qla2x00_free_sysfs_attr(base_vha, false); + +	fc_remove_host(base_vha->host); + +	scsi_remove_host(base_vha->host); + +	base_vha->flags.init_done = 0; +	qla25xx_delete_queues(base_vha); +	qla2x00_free_irqs(base_vha); +	qla2x00_free_fcports(base_vha); +	qla2x00_mem_free(ha); +	qla82xx_md_free(base_vha); +	qla2x00_free_queues(ha); + +	scsi_host_put(base_vha->host); + +	qla2x00_unmap_iobases(ha); + +	pci_release_selected_regions(ha->pdev, ha->bars); +	kfree(ha); +	ha = NULL; + +	pci_disable_pcie_error_reporting(pdev); +	pci_disable_device(pdev); +	pci_set_drvdata(pdev, NULL); + +} +  /**************************************************************************  * qla2x00_do_dpc  *   This kernel thread is a task that is schedule by the interrupt handler @@ -4759,7 +4837,7 @@ qla2x00_do_dpc(void *data)  	ha = (struct qla_hw_data *)data;  	base_vha = pci_get_drvdata(ha->pdev); -	set_user_nice(current, -20); +	set_user_nice(current, MIN_NICE);  	set_current_state(TASK_INTERRUPTIBLE);  	while (!kthread_should_stop()) { @@ -4844,12 +4922,13 @@ qla2x00_do_dpc(void *data)  				if (qlafx00_reset_initialize(base_vha)) {  					/* Failed. Abort isp later. */  					if (!test_bit(UNLOADING, -					    &base_vha->dpc_flags)) +					    &base_vha->dpc_flags)) {  						set_bit(ISP_UNRECOVERABLE,  						    &base_vha->dpc_flags);  						ql_dbg(ql_dbg_dpc, base_vha,  						    0x4021,  						    "Reset Recovery Failed\n"); +					}  				}  			} @@ -4868,6 +4947,14 @@ qla2x00_do_dpc(void *data)  				ql_dbg(ql_dbg_dpc, base_vha, 0x401f,  				    "ISPFx00 Target Scan End\n");  			} +			if (test_and_clear_bit(FX00_HOST_INFO_RESEND, +				&base_vha->dpc_flags)) { +				ql_dbg(ql_dbg_dpc, base_vha, 0x4023, +				    "ISPFx00 Host Info resend scheduled\n"); +				qlafx00_fx_disc(base_vha, +				    &base_vha->hw->mr.fcport, +				    FXDISC_REG_HOST_INFO); +			}  		}  		if (test_and_clear_bit(ISP_ABORT_NEEDED, @@ -4995,24 +5082,15 @@ loop_resync_check:  			qla2xxx_flash_npiv_conf(base_vha);  		} -		if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, -		    &base_vha->dpc_flags)) { -			/* Prevents simultaneous ramp up and down */ -			clear_bit(HOST_RAMP_UP_QUEUE_DEPTH, -			    &base_vha->dpc_flags); -			qla2x00_host_ramp_down_queuedepth(base_vha); -		} - -		if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH, -		    &base_vha->dpc_flags)) -			qla2x00_host_ramp_up_queuedepth(base_vha);  intr_on_check:  		if (!ha->interrupts_on)  			ha->isp_ops->enable_intrs(ha);  		if (test_and_clear_bit(BEACON_BLINK_NEEDED, -					&base_vha->dpc_flags)) -			ha->isp_ops->beacon_blink(base_vha); +					&base_vha->dpc_flags)) { +			if (ha->beacon_blink_led == 1) +				ha->isp_ops->beacon_blink(base_vha); +		}  		if (!IS_QLAFX00(ha))  			qla2x00_do_dpc_all_vps(base_vha); @@ -5100,9 +5178,20 @@ qla2x00_timer(scsi_qla_host_t *vha)  		return;  	} -	/* Hardware read to raise pending EEH errors during mailbox waits. */ -	if (!pci_channel_offline(ha->pdev)) +	/* +	 * Hardware read to raise pending EEH errors during mailbox waits. If +	 * the read returns -1 then disable the board. +	 */ +	if (!pci_channel_offline(ha->pdev)) {  		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); +		if (w == 0xffff) +			/* +			 * Schedule this on the default system workqueue so that +			 * all the adapter workqueues and the DPC thread can be +			 * shutdown cleanly. +			 */ +			schedule_work(&ha->board_disable); +	}  	/* Make sure qla82xx_watchdog is run only for physical port */  	if (!vha->vp_idx && IS_P3P_TYPE(ha)) { @@ -5187,7 +5276,6 @@ qla2x00_timer(scsi_qla_host_t *vha)  		    "Loop down - seconds remaining %d.\n",  		    atomic_read(&vha->loop_down_timer));  	} -  	/* Check if beacon LED needs to be blinked for physical host only */  	if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {  		/* There is no beacon_blink function for ISP82xx */ @@ -5211,9 +5299,7 @@ qla2x00_timer(scsi_qla_host_t *vha)  	    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||  	    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||  	    test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || -	    test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || -	    test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) || -	    test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) { +	    test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {  		ql_dbg(ql_dbg_timer, vha, 0x600b,  		    "isp_abort_needed=%d loop_resync_needed=%d "  		    "fcport_update_needed=%d start_dpc=%d " @@ -5226,15 +5312,12 @@ qla2x00_timer(scsi_qla_host_t *vha)  		ql_dbg(ql_dbg_timer, vha, 0x600c,  		    "beacon_blink_needed=%d isp_unrecoverable=%d "  		    "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " -		    "relogin_needed=%d, host_ramp_down_needed=%d " -		    "host_ramp_up_needed=%d.\n", +		    "relogin_needed=%d.\n",  		    test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),  		    test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),  		    test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),  		    test_bit(VP_DPC_NEEDED, &vha->dpc_flags), -		    test_bit(RELOGIN_NEEDED, &vha->dpc_flags), -		    test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags), -		    test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags)); +		    test_bit(RELOGIN_NEEDED, &vha->dpc_flags));  		qla2xxx_wake_dpc(vha);  	} @@ -5243,7 +5326,7 @@ qla2x00_timer(scsi_qla_host_t *vha)  /* Firmware interface routines. */ -#define FW_BLOBS	10 +#define FW_BLOBS	11  #define FW_ISP21XX	0  #define FW_ISP22XX	1  #define FW_ISP2300	2 @@ -5254,6 +5337,7 @@ qla2x00_timer(scsi_qla_host_t *vha)  #define FW_ISP82XX	7  #define FW_ISP2031	8  #define FW_ISP8031	9 +#define FW_ISP27XX	10  #define FW_FILE_ISP21XX	"ql2100_fw.bin"  #define FW_FILE_ISP22XX	"ql2200_fw.bin" @@ -5265,6 +5349,8 @@ qla2x00_timer(scsi_qla_host_t *vha)  #define FW_FILE_ISP82XX	"ql8200_fw.bin"  #define FW_FILE_ISP2031	"ql2600_fw.bin"  #define FW_FILE_ISP8031	"ql8300_fw.bin" +#define FW_FILE_ISP27XX	"ql2700_fw.bin" +  static DEFINE_MUTEX(qla_fw_lock); @@ -5279,6 +5365,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {  	{ .name = FW_FILE_ISP82XX, },  	{ .name = FW_FILE_ISP2031, },  	{ .name = FW_FILE_ISP8031, }, +	{ .name = FW_FILE_ISP27XX, },  };  struct fw_blob * @@ -5307,6 +5394,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)  		blob = &qla_fw_blobs[FW_ISP2031];  	} else if (IS_QLA8031(ha)) {  		blob = &qla_fw_blobs[FW_ISP8031]; +	} else if (IS_QLA27XX(ha)) { +		blob = &qla_fw_blobs[FW_ISP27XX];  	} else {  		return NULL;  	} @@ -5636,6 +5725,8 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {  	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },  	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },  	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, +	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, +	{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },  	{ 0 },  };  MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h index 46ef0ac48f4..2fb7ebfbbc3 100644 --- a/drivers/scsi/qla2xxx/qla_settings.h +++ b/drivers/scsi/qla2xxx/qla_settings.h @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index bd56cde795f..bca173e56f1 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1,6 +1,6 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */ @@ -568,7 +568,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)  	else if (IS_P3P_TYPE(ha)) {  		*start = FA_FLASH_LAYOUT_ADDR_82;  		goto end; -	} else if (IS_QLA83XX(ha)) { +	} else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {  		*start = FA_FLASH_LAYOUT_ADDR_83;  		goto end;  	} @@ -682,7 +682,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)  	/* Assign FCP prio region since older adapters may not have FLT, or  	   FCP prio region in it's FLT.  	 */ -	ha->flt_region_fcp_prio = ha->flags.port0 ? +	ha->flt_region_fcp_prio = (ha->port_no == 0) ?  	    fcp_prio_cfg0[def] : fcp_prio_cfg1[def];  	ha->flt_region_flt = flt_addr; @@ -743,47 +743,71 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)  			ha->flt_region_vpd_nvram = start;  			if (IS_P3P_TYPE(ha))  				break; -			if (ha->flags.port0) +			if (ha->port_no == 0)  				ha->flt_region_vpd = start;  			break;  		case FLT_REG_VPD_1:  			if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))  				break; -			if (!ha->flags.port0) +			if (ha->port_no == 1) +				ha->flt_region_vpd = start; +			break; +		case FLT_REG_VPD_2: +			if (!IS_QLA27XX(ha)) +				break; +			if (ha->port_no == 2) +				ha->flt_region_vpd = start; +			break; +		case FLT_REG_VPD_3: +			if (!IS_QLA27XX(ha)) +				break; +			if (ha->port_no == 3)  				ha->flt_region_vpd = start;  			break;  		case FLT_REG_NVRAM_0:  			if (IS_QLA8031(ha))  				break; -			if (ha->flags.port0) +			if (ha->port_no == 0)  				ha->flt_region_nvram = start;  			break;  		case FLT_REG_NVRAM_1:  			if (IS_QLA8031(ha))  				break; -			if (!ha->flags.port0) +			if (ha->port_no == 1) +				ha->flt_region_nvram = start; +			break; +		case FLT_REG_NVRAM_2: +			if (!IS_QLA27XX(ha)) +				break; +			if (ha->port_no == 2) +				ha->flt_region_nvram = start; +			break; +		case FLT_REG_NVRAM_3: +			if (!IS_QLA27XX(ha)) +				break; +			if (ha->port_no == 3)  				ha->flt_region_nvram = start;  			break;  		case FLT_REG_FDT:  			ha->flt_region_fdt = start;  			break;  		case FLT_REG_NPIV_CONF_0: -			if (ha->flags.port0) +			if (ha->port_no == 0)  				ha->flt_region_npiv_conf = start;  			break;  		case FLT_REG_NPIV_CONF_1: -			if (!ha->flags.port0) +			if (ha->port_no == 1)  				ha->flt_region_npiv_conf = start;  			break;  		case FLT_REG_GOLD_FW:  			ha->flt_region_gold_fw = start;  			break;  		case FLT_REG_FCP_PRIO_0: -			if (ha->flags.port0) +			if (ha->port_no == 0)  				ha->flt_region_fcp_prio = start;  			break;  		case FLT_REG_FCP_PRIO_1: -			if (!ha->flags.port0) +			if (ha->port_no == 1)  				ha->flt_region_fcp_prio = start;  			break;  		case FLT_REG_BOOT_CODE_82XX: @@ -813,13 +837,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)  		case FLT_REG_FCOE_NVRAM_0:  			if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))  				break; -			if (ha->flags.port0) +			if (ha->port_no == 0)  				ha->flt_region_nvram = start;  			break;  		case FLT_REG_FCOE_NVRAM_1:  			if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))  				break; -			if (!ha->flags.port0) +			if (ha->port_no == 1)  				ha->flt_region_nvram = start;  			break;  		} @@ -832,12 +856,12 @@ no_flash_data:  	ha->flt_region_fw = def_fw[def];  	ha->flt_region_boot = def_boot[def];  	ha->flt_region_vpd_nvram = def_vpd_nvram[def]; -	ha->flt_region_vpd = ha->flags.port0 ? +	ha->flt_region_vpd = (ha->port_no == 0) ?  	    def_vpd0[def] : def_vpd1[def]; -	ha->flt_region_nvram = ha->flags.port0 ? +	ha->flt_region_nvram = (ha->port_no == 0) ?  	    def_nvram0[def] : def_nvram1[def];  	ha->flt_region_fdt = def_fdt[def]; -	ha->flt_region_npiv_conf = ha->flags.port0 ? +	ha->flt_region_npiv_conf = (ha->port_no == 0) ?  	    def_npiv_conf0[def] : def_npiv_conf1[def];  done:  	ql_dbg(ql_dbg_init, vha, 0x004a, @@ -989,7 +1013,7 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)  	struct qla_hw_data *ha = vha->hw;  	if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && -	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) +	    !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))  		return QLA_SUCCESS;  	ret = qla2xxx_find_flt_start(vha, &flt_addr); @@ -1192,7 +1216,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,  	struct qla_hw_data *ha = vha->hw;  	/* Prepare burst-capable write on supported ISPs. */ -	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) && +	if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || +	    IS_QLA27XX(ha)) &&  	    !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {  		optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,  		    &optrom_dma, GFP_KERNEL); @@ -1675,7 +1700,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)  	if (!IS_QLA83XX(ha))  		goto out; -	if (ha->flags.port0) +	if (ha->port_no == 0)  		led_select_value = QLA83XX_LED_PORT0;  	else  		led_select_value = QLA83XX_LED_PORT1; @@ -1702,11 +1727,8 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)  	if (IS_QLA2031(ha)) {  		led_select_value = qla83xx_select_led_port(ha); -		qla83xx_wr_reg(vha, led_select_value, 0x40002000); -		qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000); -		msleep(1000); -		qla83xx_wr_reg(vha, led_select_value, 0x40004000); -		qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000); +		qla83xx_wr_reg(vha, led_select_value, 0x40000230); +		qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230);  	} else if (IS_QLA8031(ha)) {  		led_select_value = qla83xx_select_led_port(ha); @@ -2332,7 +2354,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,  				 */  				rest_addr = 0xffff;  				sec_mask = 0x10000; -				break;    +				break;  			}  			/*  			 * ST m29w010b part - 16kb sector size @@ -2558,7 +2580,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,  	uint32_t faddr, left, burst;  	struct qla_hw_data *ha = vha->hw; -	if (IS_QLA25XX(ha) || IS_QLA81XX(ha)) +	if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha))  		goto try_fast;  	if (offset & 0xfff)  		goto slow_read; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 596480022b0..e632e14180c 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -104,7 +104,6 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,  /*   * Global Variables   */ -static struct kmem_cache *qla_tgt_cmd_cachep;  static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;  static mempool_t *qla_tgt_mgmt_cmd_mempool;  static struct workqueue_struct *qla_tgt_wq; @@ -182,6 +181,11 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,  void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,  	struct atio_from_isp *atio)  { +	ql_dbg(ql_dbg_tgt, vha, 0xe072, +		"%s: qla_target(%d): type %x ox_id %04x\n", +		__func__, vha->vp_idx, atio->u.raw.entry_type, +		be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); +  	switch (atio->u.raw.entry_type) {  	case ATIO_TYPE7:  	{ @@ -236,6 +240,10 @@ void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,  void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)  {  	switch (pkt->entry_type) { +	case CTIO_CRC2: +		ql_dbg(ql_dbg_tgt, vha, 0xe073, +			"qla_target(%d):%s: CRC2 Response pkt\n", +			vha->vp_idx, __func__);  	case CTIO_TYPE7:  	{  		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; @@ -471,7 +479,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,  		schedule_delayed_work(&tgt->sess_del_work, 0);  	else  		schedule_delayed_work(&tgt->sess_del_work, -		    jiffies - sess->expires); +		    sess->expires - jiffies);  }  /* ha->hardware_lock supposed to be held on entry */ @@ -550,13 +558,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)  	struct scsi_qla_host *vha = tgt->vha;  	struct qla_hw_data *ha = vha->hw;  	struct qla_tgt_sess *sess; -	unsigned long flags; +	unsigned long flags, elapsed;  	spin_lock_irqsave(&ha->hardware_lock, flags);  	while (!list_empty(&tgt->del_sess_list)) {  		sess = list_entry(tgt->del_sess_list.next, typeof(*sess),  		    del_list_entry); -		if (time_after_eq(jiffies, sess->expires)) { +		elapsed = jiffies; +		if (time_after_eq(elapsed, sess->expires)) {  			qlt_undelete_sess(sess);  			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, @@ -566,7 +575,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)  			ha->tgt.tgt_ops->put_sess(sess);  		} else {  			schedule_delayed_work(&tgt->sess_del_work, -			    jiffies - sess->expires); +			    sess->expires - elapsed);  			break;  		}  	} @@ -589,7 +598,7 @@ static struct qla_tgt_sess *qlt_create_sess(  	/* Check to avoid double sessions */  	spin_lock_irqsave(&ha->hardware_lock, flags); -	list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list, +	list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,  				sess_list_entry) {  		if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {  			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005, @@ -626,7 +635,7 @@ static struct qla_tgt_sess *qlt_create_sess(  		return NULL;  	} -	sess->tgt = ha->tgt.qla_tgt; +	sess->tgt = vha->vha_tgt.qla_tgt;  	sess->vha = vha;  	sess->s_id = fcport->d_id;  	sess->loop_id = fcport->loop_id; @@ -634,7 +643,7 @@ static struct qla_tgt_sess *qlt_create_sess(  	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,  	    "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", -	    sess, ha->tgt.qla_tgt); +	    sess, vha->vha_tgt.qla_tgt);  	be_sid[0] = sess->s_id.b.domain;  	be_sid[1] = sess->s_id.b.area; @@ -661,8 +670,8 @@ static struct qla_tgt_sess *qlt_create_sess(  	memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));  	spin_lock_irqsave(&ha->hardware_lock, flags); -	list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list); -	ha->tgt.qla_tgt->sess_count++; +	list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); +	vha->vha_tgt.qla_tgt->sess_count++;  	spin_unlock_irqrestore(&ha->hardware_lock, flags);  	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, @@ -681,7 +690,7 @@ static struct qla_tgt_sess *qlt_create_sess(  void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)  {  	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  	struct qla_tgt_sess *sess;  	unsigned long flags; @@ -691,6 +700,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)  	if (!tgt || (fcport->port_type != FCT_INITIATOR))  		return; +	if (qla_ini_mode_enabled(vha)) +		return; +  	spin_lock_irqsave(&ha->hardware_lock, flags);  	if (tgt->tgt_stop) {  		spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -700,9 +712,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)  	if (!sess) {  		spin_unlock_irqrestore(&ha->hardware_lock, flags); -		mutex_lock(&ha->tgt.tgt_mutex); +		mutex_lock(&vha->vha_tgt.tgt_mutex);  		sess = qlt_create_sess(vha, fcport, false); -		mutex_unlock(&ha->tgt.tgt_mutex); +		mutex_unlock(&vha->vha_tgt.tgt_mutex);  		spin_lock_irqsave(&ha->hardware_lock, flags);  	} else { @@ -738,7 +750,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)  void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)  {  	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  	struct qla_tgt_sess *sess;  	unsigned long flags; @@ -786,17 +798,32 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt)  }  /* Called by tcm_qla2xxx configfs code */ -void qlt_stop_phase1(struct qla_tgt *tgt) +int qlt_stop_phase1(struct qla_tgt *tgt)  {  	struct scsi_qla_host *vha = tgt->vha;  	struct qla_hw_data *ha = tgt->ha;  	unsigned long flags; +	mutex_lock(&qla_tgt_mutex); +	if (!vha->fc_vport) { +		struct Scsi_Host *sh = vha->host; +		struct fc_host_attrs *fc_host = shost_to_fc_host(sh); +		bool npiv_vports; + +		spin_lock_irqsave(sh->host_lock, flags); +		npiv_vports = (fc_host->npiv_vports_inuse); +		spin_unlock_irqrestore(sh->host_lock, flags); + +		if (npiv_vports) { +			mutex_unlock(&qla_tgt_mutex); +			return -EPERM; +		} +	}  	if (tgt->tgt_stop || tgt->tgt_stopped) {  		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,  		    "Already in tgt->tgt_stop or tgt_stopped state\n"); -		dump_stack(); -		return; +		mutex_unlock(&qla_tgt_mutex); +		return -EPERM;  	}  	ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", @@ -805,12 +832,13 @@ void qlt_stop_phase1(struct qla_tgt *tgt)  	 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].  	 * Lock is needed, because we still can get an incoming packet.  	 */ -	mutex_lock(&ha->tgt.tgt_mutex); +	mutex_lock(&vha->vha_tgt.tgt_mutex);  	spin_lock_irqsave(&ha->hardware_lock, flags);  	tgt->tgt_stop = 1;  	qlt_clear_tgt_db(tgt, true);  	spin_unlock_irqrestore(&ha->hardware_lock, flags); -	mutex_unlock(&ha->tgt.tgt_mutex); +	mutex_unlock(&vha->vha_tgt.tgt_mutex); +	mutex_unlock(&qla_tgt_mutex);  	flush_delayed_work(&tgt->sess_del_work); @@ -837,6 +865,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)  	/* Wait for sessions to clear out (just in case) */  	wait_event(tgt->waitQ, test_tgt_sess_count(tgt)); +	return 0;  }  EXPORT_SYMBOL(qlt_stop_phase1); @@ -844,20 +873,21 @@ EXPORT_SYMBOL(qlt_stop_phase1);  void qlt_stop_phase2(struct qla_tgt *tgt)  {  	struct qla_hw_data *ha = tgt->ha; +	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);  	unsigned long flags;  	if (tgt->tgt_stopped) { -		ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f, +		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,  		    "Already in tgt->tgt_stopped state\n");  		dump_stack();  		return;  	} -	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b, +	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,  	    "Waiting for %d IRQ commands to complete (tgt %p)",  	    tgt->irq_cmd_count, tgt); -	mutex_lock(&ha->tgt.tgt_mutex); +	mutex_lock(&vha->vha_tgt.tgt_mutex);  	spin_lock_irqsave(&ha->hardware_lock, flags);  	while (tgt->irq_cmd_count != 0) {  		spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -867,9 +897,9 @@ void qlt_stop_phase2(struct qla_tgt *tgt)  	tgt->tgt_stop = 0;  	tgt->tgt_stopped = 1;  	spin_unlock_irqrestore(&ha->hardware_lock, flags); -	mutex_unlock(&ha->tgt.tgt_mutex); +	mutex_unlock(&vha->vha_tgt.tgt_mutex); -	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished", +	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",  	    tgt);  }  EXPORT_SYMBOL(qlt_stop_phase2); @@ -877,14 +907,14 @@ EXPORT_SYMBOL(qlt_stop_phase2);  /* Called from qlt_remove_target() -> qla2x00_remove_one() */  static void qlt_release(struct qla_tgt *tgt)  { -	struct qla_hw_data *ha = tgt->ha; +	scsi_qla_host_t *vha = tgt->vha; -	if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped) +	if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)  		qlt_stop_phase2(tgt); -	ha->tgt.qla_tgt = NULL; +	vha->vha_tgt.qla_tgt = NULL; -	ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d, +	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,  	    "Release of tgt %p finished\n", tgt);  	kfree(tgt); @@ -948,8 +978,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,  		return;  	} -	if (ha->tgt.qla_tgt != NULL) -		ha->tgt.qla_tgt->notify_ack_expected++; +	if (vha->vha_tgt.qla_tgt != NULL) +		vha->vha_tgt.qla_tgt->notify_ack_expected++;  	pkt->entry_type = NOTIFY_ACK_TYPE;  	pkt->entry_count = 1; @@ -1053,7 +1083,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,  		/* Other bytes are zero */  	} -	ha->tgt.qla_tgt->abts_resp_expected++; +	vha->vha_tgt.qla_tgt->abts_resp_expected++;  	qla2x00_start_iocbs(vha, vha->req);  } @@ -1098,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,  	ctio->u.status1.flags =  	    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |  		CTIO7_FLAGS_TERMINATE); -	ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; +	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);  	qla2x00_start_iocbs(vha, vha->req); @@ -1205,7 +1235,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,  		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,  		    "qla_target(%d): task abort for non-existant session\n",  		    vha->vp_idx); -		rc = qlt_sched_sess_work(ha->tgt.qla_tgt, +		rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,  		    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));  		if (rc != 0) {  			qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, @@ -1232,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,  {  	struct atio_from_isp *atio = &mcmd->orig_iocb.atio;  	struct ctio7_to_24xx *ctio; +	uint16_t temp;  	ql_dbg(ql_dbg_tgt, ha, 0xe008,  	    "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", @@ -1262,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,  	ctio->u.status1.flags = (atio->u.isp24.attr << 9) |  	    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |  		CTIO7_FLAGS_SEND_STATUS); -	ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); +	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); +	ctio->u.status1.ox_id = cpu_to_le16(temp);  	ctio->u.status1.scsi_status =  	    __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);  	ctio->u.status1.response_len = __constant_cpu_to_le16(8); @@ -1328,13 +1360,42 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)  	prm->cmd->sg_mapped = 1; -	/* -	 * If greater than four sg entries then we need to allocate -	 * the continuation entries -	 */ -	if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) -		prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - -		    prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont); +	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) { +		/* +		 * If greater than four sg entries then we need to allocate +		 * the continuation entries +		 */ +		if (prm->seg_cnt > prm->tgt->datasegs_per_cmd) +			prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt - +			prm->tgt->datasegs_per_cmd, +			prm->tgt->datasegs_per_cont); +	} else { +		/* DIF */ +		if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || +		    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { +			prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz); +			prm->tot_dsds = prm->seg_cnt; +		} else +			prm->tot_dsds = prm->seg_cnt; + +		if (cmd->prot_sg_cnt) { +			prm->prot_sg      = cmd->prot_sg; +			prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev, +				cmd->prot_sg, cmd->prot_sg_cnt, +				cmd->dma_data_direction); +			if (unlikely(prm->prot_seg_cnt == 0)) +				goto out_err; + +			if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) || +			    (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) { +				/* Dif Bundling not support here */ +				prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen, +								cmd->blk_sz); +				prm->tot_dsds += prm->prot_seg_cnt; +			} else +				prm->tot_dsds += prm->prot_seg_cnt; +		} +	}  	ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",  	    prm->seg_cnt, prm->req_cnt); @@ -1355,6 +1416,16 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,  	BUG_ON(!cmd->sg_mapped);  	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);  	cmd->sg_mapped = 0; + +	if (cmd->prot_sg_cnt) +		pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt, +			cmd->dma_data_direction); + +	if (cmd->ctx_dsd_alloced) +		qla2x00_clean_dsd_pool(ha, NULL, cmd); + +	if (cmd->ctx) +		dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);  }  static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, @@ -1444,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,  	struct ctio7_to_24xx *pkt;  	struct qla_hw_data *ha = vha->hw;  	struct atio_from_isp *atio = &prm->cmd->atio; +	uint16_t temp;  	pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;  	prm->pkt = pkt; @@ -1472,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,  	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];  	pkt->exchange_addr = atio->u.isp24.exchange_addr;  	pkt->u.status0.flags |= (atio->u.isp24.attr << 9); -	pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); +	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); +	pkt->u.status0.ox_id = cpu_to_le16(temp);  	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);  	ql_dbg(ql_dbg_tgt, vha, 0xe00c,  	    "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", -	    vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, -	    le16_to_cpu(pkt->u.status0.ox_id)); +	    vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);  	return 0;  } @@ -1643,8 +1715,9 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,  		return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;  	} -	ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n", -	    vha->vp_idx, cmd->tag); +	ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n", +		vha->vp_idx, cmd->tag, +		be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));  	prm->cmd = cmd;  	prm->tgt = tgt; @@ -1880,6 +1953,328 @@ skip_explict_conf:  	/* Sense with len > 24, is it possible ??? */  } + + +/* diff  */ +static inline int +qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) +{ +	/* +	 * Uncomment when corresponding SCSI changes are done. +	 * +	 if (!sp->cmd->prot_chk) +	 return 0; +	 * +	 */ +	switch (se_cmd->prot_op) { +	case TARGET_PROT_DOUT_INSERT: +	case TARGET_PROT_DIN_STRIP: +		if (ql2xenablehba_err_chk >= 1) +			return 1; +		break; +	case TARGET_PROT_DOUT_PASS: +	case TARGET_PROT_DIN_PASS: +		if (ql2xenablehba_err_chk >= 2) +			return 1; +		break; +	case TARGET_PROT_DIN_INSERT: +	case TARGET_PROT_DOUT_STRIP: +		return 1; +	default: +		break; +	} +	return 0; +} + +/* + * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command + * + */ +static inline void +qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) +{ +	uint32_t lba = 0xffffffff & se_cmd->t_task_lba; + +	/* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 +	 * have been immplemented by TCM, before AppTag is avail. +	 * Look for modesense_handlers[] +	 */ +	ctx->app_tag = 0; +	ctx->app_tag_mask[0] = 0x0; +	ctx->app_tag_mask[1] = 0x0; + +	switch (se_cmd->prot_type) { +	case TARGET_DIF_TYPE0_PROT: +		/* +		 * No check for ql2xenablehba_err_chk, as it would be an +		 * I/O error if hba tag generation is not done. +		 */ +		ctx->ref_tag = cpu_to_le32(lba); + +		if (!qlt_hba_err_chk_enabled(se_cmd)) +			break; + +		/* enable ALL bytes of the ref tag */ +		ctx->ref_tag_mask[0] = 0xff; +		ctx->ref_tag_mask[1] = 0xff; +		ctx->ref_tag_mask[2] = 0xff; +		ctx->ref_tag_mask[3] = 0xff; +		break; +	/* +	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and +	 * 16 bit app tag. +	 */ +	case TARGET_DIF_TYPE1_PROT: +		ctx->ref_tag = cpu_to_le32(lba); + +		if (!qlt_hba_err_chk_enabled(se_cmd)) +			break; + +		/* enable ALL bytes of the ref tag */ +		ctx->ref_tag_mask[0] = 0xff; +		ctx->ref_tag_mask[1] = 0xff; +		ctx->ref_tag_mask[2] = 0xff; +		ctx->ref_tag_mask[3] = 0xff; +		break; +	/* +	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to +	 * match LBA in CDB + N +	 */ +	case TARGET_DIF_TYPE2_PROT: +		ctx->ref_tag = cpu_to_le32(lba); + +		if (!qlt_hba_err_chk_enabled(se_cmd)) +			break; + +		/* enable ALL bytes of the ref tag */ +		ctx->ref_tag_mask[0] = 0xff; +		ctx->ref_tag_mask[1] = 0xff; +		ctx->ref_tag_mask[2] = 0xff; +		ctx->ref_tag_mask[3] = 0xff; +		break; + +	/* For Type 3 protection: 16 bit GUARD only */ +	case TARGET_DIF_TYPE3_PROT: +		ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = +			ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; +		break; +	} +} + + +static inline int +qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) +{ +	uint32_t		*cur_dsd; +	int			sgc; +	uint32_t		transfer_length = 0; +	uint32_t		data_bytes; +	uint32_t		dif_bytes; +	uint8_t			bundling = 1; +	uint8_t			*clr_ptr; +	struct crc_context	*crc_ctx_pkt = NULL; +	struct qla_hw_data	*ha; +	struct ctio_crc2_to_fw	*pkt; +	dma_addr_t		crc_ctx_dma; +	uint16_t		fw_prot_opts = 0; +	struct qla_tgt_cmd	*cmd = prm->cmd; +	struct se_cmd		*se_cmd = &cmd->se_cmd; +	uint32_t h; +	struct atio_from_isp *atio = &prm->cmd->atio; +	uint16_t t16; + +	sgc = 0; +	ha = vha->hw; + +	pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; +	prm->pkt = pkt; +	memset(pkt, 0, sizeof(*pkt)); + +	ql_dbg(ql_dbg_tgt, vha, 0xe071, +		"qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", +		vha->vp_idx, __func__, se_cmd, se_cmd->prot_op, +		prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); + +	if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) || +	    (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP)) +		bundling = 0; + +	/* Compute dif len and adjust data len to incude protection */ +	data_bytes = cmd->bufflen; +	dif_bytes  = (data_bytes / cmd->blk_sz) * 8; + +	switch (se_cmd->prot_op) { +	case TARGET_PROT_DIN_INSERT: +	case TARGET_PROT_DOUT_STRIP: +		transfer_length = data_bytes; +		data_bytes += dif_bytes; +		break; + +	case TARGET_PROT_DIN_STRIP: +	case TARGET_PROT_DOUT_INSERT: +	case TARGET_PROT_DIN_PASS: +	case TARGET_PROT_DOUT_PASS: +		transfer_length = data_bytes + dif_bytes; +		break; + +	default: +		BUG(); +		break; +	} + +	if (!qlt_hba_err_chk_enabled(se_cmd)) +		fw_prot_opts |= 0x10; /* Disable Guard tag checking */ +	/* HBA error checking enabled */ +	else if (IS_PI_UNINIT_CAPABLE(ha)) { +		if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || +		    (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) +			fw_prot_opts |= PO_DIS_VALD_APP_ESC; +		else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) +			fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; +	} + +	switch (se_cmd->prot_op) { +	case TARGET_PROT_DIN_INSERT: +	case TARGET_PROT_DOUT_INSERT: +		fw_prot_opts |= PO_MODE_DIF_INSERT; +		break; +	case TARGET_PROT_DIN_STRIP: +	case TARGET_PROT_DOUT_STRIP: +		fw_prot_opts |= PO_MODE_DIF_REMOVE; +		break; +	case TARGET_PROT_DIN_PASS: +	case TARGET_PROT_DOUT_PASS: +		fw_prot_opts |= PO_MODE_DIF_PASS; +		/* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */ +		break; +	default:/* Normal Request */ +		fw_prot_opts |= PO_MODE_DIF_PASS; +		break; +	} + + +	/* ---- PKT ---- */ +	/* Update entry type to indicate Command Type CRC_2 IOCB */ +	pkt->entry_type  = CTIO_CRC2; +	pkt->entry_count = 1; +	pkt->vp_index = vha->vp_idx; + +	h = qlt_make_handle(vha); +	if (unlikely(h == QLA_TGT_NULL_HANDLE)) { +		/* +		 * CTIO type 7 from the firmware doesn't provide a way to +		 * know the initiator's LOOP ID, hence we can't find +		 * the session and, so, the command. +		 */ +		return -EAGAIN; +	} else +		ha->tgt.cmds[h-1] = prm->cmd; + + +	pkt->handle  = h | CTIO_COMPLETION_HANDLE_MARK; +	pkt->nport_handle = prm->cmd->loop_id; +	pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); +	pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; +	pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; +	pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; +	pkt->exchange_addr   = atio->u.isp24.exchange_addr; + +	/* silence compile warning */ +	t16 = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); +	pkt->ox_id  = cpu_to_le16(t16); + +	t16 = (atio->u.isp24.attr << 9); +	pkt->flags |= cpu_to_le16(t16); +	pkt->relative_offset = cpu_to_le32(prm->cmd->offset); + +	/* Set transfer direction */ +	if (cmd->dma_data_direction == DMA_TO_DEVICE) +		pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN); +	else if (cmd->dma_data_direction == DMA_FROM_DEVICE) +		pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT); + + +	pkt->dseg_count = prm->tot_dsds; +	/* Fibre channel byte count */ +	pkt->transfer_length = cpu_to_le32(transfer_length); + + +	/* ----- CRC context -------- */ + +	/* Allocate CRC context from global pool */ +	crc_ctx_pkt = cmd->ctx = +	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); + +	if (!crc_ctx_pkt) +		goto crc_queuing_error; + +	/* Zero out CTX area. */ +	clr_ptr = (uint8_t *)crc_ctx_pkt; +	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt)); + +	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma; +	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list); + +	/* Set handle */ +	crc_ctx_pkt->handle = pkt->handle; + +	qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); + +	pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); +	pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); +	pkt->crc_context_len = CRC_CONTEXT_LEN_FW; + + +	if (!bundling) { +		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; +	} else { +		/* +		 * Configure Bundling if we need to fetch interlaving +		 * protection PCI accesses +		 */ +		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING; +		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); +		crc_ctx_pkt->u.bundling.dseg_count = +			cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); +		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address; +	} + +	/* Finish the common fields of CRC pkt */ +	crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz); +	crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts); +	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); +	crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); + + +	/* Walks data segments */ +	pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR); + +	if (!bundling && prm->prot_seg_cnt) { +		if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, +			prm->tot_dsds, cmd)) +			goto crc_queuing_error; +	} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, +		(prm->tot_dsds - prm->prot_seg_cnt), cmd)) +		goto crc_queuing_error; + +	if (bundling && prm->prot_seg_cnt) { +		/* Walks dif segments */ +		pkt->add_flags |= CTIO_CRC2_AF_DIF_DSD_ENA; + +		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; +		if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, +			prm->prot_seg_cnt, cmd)) +			goto crc_queuing_error; +	} +	return QLA_SUCCESS; + +crc_queuing_error: +	/* Cleanup will be performed by the caller */ + +	return QLA_FUNCTION_FAILED; +} + +  /*   * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *   * QLA_TGT_XMIT_STATUS for >= 24xx silicon @@ -1899,9 +2294,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,  	qlt_check_srr_debug(cmd, &xmit_type);  	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018, -	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, " -	    "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? -	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction); +	    "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n", +	    (xmit_type & QLA_TGT_XMIT_STATUS) ? +	    1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, +	    &cmd->se_cmd);  	res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,  	    &full_req_cnt); @@ -1919,7 +2315,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,  	if (unlikely(res))  		goto out_unmap_unlock; -	res = qlt_24xx_build_ctio_pkt(&prm, vha); +	if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA)) +		res = qlt_build_ctio_crc2_pkt(&prm, vha); +	else +		res = qlt_24xx_build_ctio_pkt(&prm, vha);  	if (unlikely(res != 0))  		goto out_unmap_unlock; @@ -1931,7 +2330,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,  		    __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |  			CTIO7_FLAGS_STATUS_MODE_0); -		qlt_load_data_segments(&prm, vha); +		if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) +			qlt_load_data_segments(&prm, vha);  		if (prm.add_status_pkt == 0) {  			if (xmit_type & QLA_TGT_XMIT_STATUS) { @@ -1961,8 +2361,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,  			ql_dbg(ql_dbg_tgt, vha, 0xe019,  			    "Building additional status packet\n"); +			/* +			 * T10Dif: ctio_crc2_to_fw overlay ontop of +			 * ctio7_to_24xx +			 */  			memcpy(ctio, pkt, sizeof(*ctio)); +			/* reset back to CTIO7 */  			ctio->entry_count = 1; +			ctio->entry_type = CTIO_TYPE7;  			ctio->dseg_count = 0;  			ctio->u.status1.flags &= ~__constant_cpu_to_le16(  			    CTIO7_FLAGS_DATA_IN); @@ -1971,6 +2377,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,  			pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;  			pkt->u.status0.flags |= __constant_cpu_to_le16(  			    CTIO7_FLAGS_DONT_RET_CTIO); + +			/* qlt_24xx_init_ctio_to_isp will correct +			 * all neccessary fields that's part of CTIO7. +			 * There should be no residual of CTIO-CRC2 data. +			 */  			qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,  			    &prm);  			pr_debug("Status CTIO7: %p\n", ctio); @@ -2019,8 +2430,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)  	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)  		return -EIO; -	ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)", -	    (int)vha->vp_idx); +	ql_dbg(ql_dbg_tgt, vha, 0xe01b, +		"%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n", +		__func__, (int)vha->vp_idx, &cmd->se_cmd, +		be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));  	/* Calculate number of entries and segments required */  	if (qlt_pci_map_calc_cnt(&prm) != 0) @@ -2032,14 +2445,19 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)  	res = qlt_check_reserve_free_req(vha, prm.req_cnt);  	if (res != 0)  		goto out_unlock_free_unmap; +	if (cmd->se_cmd.prot_op) +		res = qlt_build_ctio_crc2_pkt(&prm, vha); +	else +		res = qlt_24xx_build_ctio_pkt(&prm, vha); -	res = qlt_24xx_build_ctio_pkt(&prm, vha);  	if (unlikely(res != 0))  		goto out_unlock_free_unmap;  	pkt = (struct ctio7_to_24xx *)prm.pkt;  	pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |  	    CTIO7_FLAGS_STATUS_MODE_0); -	qlt_load_data_segments(&prm, vha); + +	if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) +		qlt_load_data_segments(&prm, vha);  	cmd->state = QLA_TGT_STATE_NEED_DATA; @@ -2057,6 +2475,143 @@ out_unlock_free_unmap:  }  EXPORT_SYMBOL(qlt_rdy_to_xfer); + +/* + * Checks the guard or meta-data for the type of error + * detected by the HBA. + */ +static inline int +qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, +		struct ctio_crc_from_fw *sts) +{ +	uint8_t		*ap = &sts->actual_dif[0]; +	uint8_t		*ep = &sts->expected_dif[0]; +	uint32_t	e_ref_tag, a_ref_tag; +	uint16_t	e_app_tag, a_app_tag; +	uint16_t	e_guard, a_guard; +	uint64_t	lba = cmd->se_cmd.t_task_lba; + +	a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0)); +	a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); +	a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); + +	e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0)); +	e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); +	e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); + +	ql_dbg(ql_dbg_tgt, vha, 0xe075, +	    "iocb(s) %p Returned STATUS.\n", sts); + +	ql_dbg(ql_dbg_tgt, vha, 0xf075, +	    "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", +	    cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, +	    a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); + +	/* +	 * Ignore sector if: +	 * For type     3: ref & app tag is all 'f's +	 * For type 0,1,2: app tag is all 'f's +	 */ +	if ((a_app_tag == 0xffff) && +	    ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || +	     (a_ref_tag == 0xffffffff))) { +		uint32_t blocks_done; + +		/* 2TB boundary case covered automatically with this */ +		blocks_done = e_ref_tag - (uint32_t)lba + 1; +		cmd->se_cmd.bad_sector = e_ref_tag; +		cmd->se_cmd.pi_err = 0; +		ql_dbg(ql_dbg_tgt, vha, 0xf074, +			"need to return scsi good\n"); + +		/* Update protection tag */ +		if (cmd->prot_sg_cnt) { +			uint32_t i, j = 0, k = 0, num_ent; +			struct scatterlist *sg, *sgl; + + +			sgl = cmd->prot_sg; + +			/* Patch the corresponding protection tags */ +			for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { +				num_ent = sg_dma_len(sg) / 8; +				if (k + num_ent < blocks_done) { +					k += num_ent; +					continue; +				} +				j = blocks_done - k - 1; +				k = blocks_done; +				break; +			} + +			if (k != blocks_done) { +				ql_log(ql_log_warn, vha, 0xf076, +				    "unexpected tag values tag:lba=%u:%llu)\n", +				    e_ref_tag, (unsigned long long)lba); +				goto out; +			} + +#if 0 +			struct sd_dif_tuple *spt; +			/* TODO: +			 * This section came from initiator. Is it valid here? +			 * should ulp be override with actual val??? +			 */ +			spt = page_address(sg_page(sg)) + sg->offset; +			spt += j; + +			spt->app_tag = 0xffff; +			if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) +				spt->ref_tag = 0xffffffff; +#endif +		} + +		return 0; +	} + +	/* check guard */ +	if (e_guard != a_guard) { +		cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; +		cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; + +		ql_log(ql_log_warn, vha, 0xe076, +		    "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", +		    cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, +		    a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, +		    a_guard, e_guard, cmd); +		goto out; +	} + +	/* check ref tag */ +	if (e_ref_tag != a_ref_tag) { +		cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; +		cmd->se_cmd.bad_sector = e_ref_tag; + +		ql_log(ql_log_warn, vha, 0xe077, +			"Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", +			cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, +			a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, +			a_guard, e_guard, cmd); +		goto out; +	} + +	/* check appl tag */ +	if (e_app_tag != a_app_tag) { +		cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; +		cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; + +		ql_log(ql_log_warn, vha, 0xe078, +			"App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", +			cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, +			a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, +			a_guard, e_guard, cmd); +		goto out; +	} +out: +	return 1; +} + +  /* If hardware_lock held on entry, might drop it, then reaquire */  /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */  static int __qlt_send_term_exchange(struct scsi_qla_host *vha, @@ -2067,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,  	struct qla_hw_data *ha = vha->hw;  	request_t *pkt;  	int ret = 0; +	uint16_t temp;  	ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); @@ -2103,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,  	ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |  	    __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |  		CTIO7_FLAGS_TERMINATE); -	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); +	temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); +	ctio24->u.status1.ox_id = cpu_to_le16(temp);  	/* Most likely, it isn't needed */  	ctio24->u.status1.residual = get_unaligned((uint32_t *) @@ -2133,21 +2690,46 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,  	rc = __qlt_send_term_exchange(vha, cmd, atio);  	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);  done: -	if (rc == 1) { +	/* +	 * Terminate exchange will tell fw to release any active CTIO +	 * that's in FW posession and cleanup the exchange. +	 * +	 * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still +	 * down at FW.  Free the cmd later when CTIO comes back later +	 * w/aborted(0x2) status. +	 * +	 * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already +	 * back w/some err.  Free the cmd now. +	 */ +	if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {  		if (!ha_locked && !in_interrupt())  			msleep(250); /* just in case */ +		if (cmd->sg_mapped) +			qlt_unmap_sg(vha, cmd);  		vha->hw->tgt.tgt_ops->free_cmd(cmd);  	} +	return;  }  void qlt_free_cmd(struct qla_tgt_cmd *cmd)  { -	BUG_ON(cmd->sg_mapped); +	struct qla_tgt_sess *sess = cmd->sess; +	ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, +	    "%s: se_cmd[%p] ox_id %04x\n", +	    __func__, &cmd->se_cmd, +	    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); + +	BUG_ON(cmd->sg_mapped);  	if (unlikely(cmd->free_sg))  		kfree(cmd->sg); -	kmem_cache_free(qla_tgt_cmd_cachep, cmd); + +	if (!sess || !sess->se_sess) { +		WARN_ON(1); +		return; +	} +	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);  }  EXPORT_SYMBOL(qlt_free_cmd); @@ -2156,8 +2738,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,  	struct qla_tgt_cmd *cmd, void *ctio)  {  	struct qla_tgt_srr_ctio *sc; -	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  	struct qla_tgt_srr_imm *imm;  	tgt->ctio_srr_id++; @@ -2353,6 +2934,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,  		case CTIO_LIP_RESET:  		case CTIO_TARGET_RESET:  		case CTIO_ABORTED: +			/* driver request abort via Terminate exchange */  		case CTIO_TIMEOUT:  		case CTIO_INVALID_RX_ID:  			/* They are OK */ @@ -2383,18 +2965,58 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,  			else  				return; +		case CTIO_DIF_ERROR: { +			struct ctio_crc_from_fw *crc = +				(struct ctio_crc_from_fw *)ctio; +			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, +			    "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", +			    vha->vp_idx, status, cmd->state, se_cmd, +			    *((u64 *)&crc->actual_dif[0]), +			    *((u64 *)&crc->expected_dif[0])); + +			if (qlt_handle_dif_error(vha, cmd, ctio)) { +				if (cmd->state == QLA_TGT_STATE_NEED_DATA) { +					/* scsi Write/xfer rdy complete */ +					goto skip_term; +				} else { +					/* scsi read/xmit respond complete +					 * call handle dif to send scsi status +					 * rather than terminate exchange. +					 */ +					cmd->state = QLA_TGT_STATE_PROCESSED; +					ha->tgt.tgt_ops->handle_dif_err(cmd); +					return; +				} +			} else { +				/* Need to generate a SCSI good completion. +				 * because FW did not send scsi status. +				 */ +				status = 0; +				goto skip_term; +			} +			break; +		}  		default:  			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, -			    "qla_target(%d): CTIO with error status " -			    "0x%x received (state %x, se_cmd %p\n", +			    "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",  			    vha->vp_idx, status, cmd->state, se_cmd);  			break;  		} -		if (cmd->state != QLA_TGT_STATE_NEED_DATA) + +		/* "cmd->state == QLA_TGT_STATE_ABORTED" means +		 * cmd is already aborted/terminated, we don't +		 * need to terminate again.  The exchange is already +		 * cleaned up/freed at FW level.  Just cleanup at driver +		 * level. +		 */ +		if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && +			(cmd->state != QLA_TGT_STATE_ABORTED)) {  			if (qlt_term_ctio_exchange(vha, ctio, cmd, status))  				return; +		}  	} +skip_term:  	if (cmd->state == QLA_TGT_STATE_PROCESSED) {  		ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); @@ -2423,7 +3045,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,  		    "not return a CTIO complete\n", vha->vp_idx, cmd->state);  	} -	if (unlikely(status != CTIO_SUCCESS)) { +	if (unlikely(status != CTIO_SUCCESS) && +		(cmd->state != QLA_TGT_STATE_ABORTED)) {  		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");  		dump_stack();  	} @@ -2468,13 +3091,12 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,  /*   * Process context for I/O path into tcm_qla2xxx code   */ -static void qlt_do_work(struct work_struct *work) +static void __qlt_do_work(struct qla_tgt_cmd *cmd)  { -	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);  	scsi_qla_host_t *vha = cmd->vha;  	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; -	struct qla_tgt_sess *sess = NULL; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; +	struct qla_tgt_sess *sess = cmd->sess;  	struct atio_from_isp *atio = &cmd->atio;  	unsigned char *cdb;  	unsigned long flags; @@ -2484,41 +3106,6 @@ static void qlt_do_work(struct work_struct *work)  	if (tgt->tgt_stop)  		goto out_term; -	spin_lock_irqsave(&ha->hardware_lock, flags); -	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, -	    atio->u.isp24.fcp_hdr.s_id); -	/* Do kref_get() before dropping qla_hw_data->hardware_lock. */ -	if (sess) -		kref_get(&sess->se_sess->sess_kref); -	spin_unlock_irqrestore(&ha->hardware_lock, flags); - -	if (unlikely(!sess)) { -		uint8_t *s_id =	atio->u.isp24.fcp_hdr.s_id; - -		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, -			"qla_target(%d): Unable to find wwn login" -			" (s_id %x:%x:%x), trying to create it manually\n", -			vha->vp_idx, s_id[0], s_id[1], s_id[2]); - -		if (atio->u.raw.entry_count > 1) { -			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, -				"Dropping multy entry cmd %p\n", cmd); -			goto out_term; -		} - -		mutex_lock(&ha->tgt.tgt_mutex); -		sess = qlt_make_local_sess(vha, s_id); -		/* sess has an extra creation ref. */ -		mutex_unlock(&ha->tgt.tgt_mutex); - -		if (!sess) -			goto out_term; -	} - -	cmd->sess = sess; -	cmd->loop_id = sess->loop_id; -	cmd->conf_compl_supported = sess->conf_compl_supported; -  	cdb = &atio->u.isp24.fcp_cmnd.cdb[0];  	cmd->tag = atio->u.isp24.exchange_addr;  	cmd->unpacked_lun = scsilun_to_int( @@ -2542,11 +3129,12 @@ static void qlt_do_work(struct work_struct *work)  	    atio->u.isp24.fcp_cmnd.add_cdb_len]));  	ql_dbg(ql_dbg_tgt, vha, 0xe022, -	    "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n", -	    cmd, cmd->unpacked_lun, cmd->tag); +		"qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n", +		cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length, +		cmd->atio.u.isp24.fcp_hdr.ox_id); -	ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, -	    fcp_task_attr, data_dir, bidi); +	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, +				          fcp_task_attr, data_dir, bidi);  	if (ret != 0)  		goto out_term;  	/* @@ -2565,10 +3153,105 @@ out_term:  	 */  	spin_lock_irqsave(&ha->hardware_lock, flags);  	qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); -	kmem_cache_free(qla_tgt_cmd_cachep, cmd); -	if (sess) +	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); +	ha->tgt.tgt_ops->put_sess(sess); +	spin_unlock_irqrestore(&ha->hardware_lock, flags); +} + +static void qlt_do_work(struct work_struct *work) +{ +	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + +	__qlt_do_work(cmd); +} + +static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, +				       struct qla_tgt_sess *sess, +				       struct atio_from_isp *atio) +{ +	struct se_session *se_sess = sess->se_sess; +	struct qla_tgt_cmd *cmd; +	int tag; + +	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); +	if (tag < 0) +		return NULL; + +	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; +	memset(cmd, 0, sizeof(struct qla_tgt_cmd)); + +	memcpy(&cmd->atio, atio, sizeof(*atio)); +	cmd->state = QLA_TGT_STATE_NEW; +	cmd->tgt = vha->vha_tgt.qla_tgt; +	cmd->vha = vha; +	cmd->se_cmd.map_tag = tag; +	cmd->sess = sess; +	cmd->loop_id = sess->loop_id; +	cmd->conf_compl_supported = sess->conf_compl_supported; + +	return cmd; +} + +static void qlt_send_busy(struct scsi_qla_host *, struct atio_from_isp *, +			  uint16_t); + +static void qlt_create_sess_from_atio(struct work_struct *work) +{ +	struct qla_tgt_sess_op *op = container_of(work, +					struct qla_tgt_sess_op, work); +	scsi_qla_host_t *vha = op->vha; +	struct qla_hw_data *ha = vha->hw; +	struct qla_tgt_sess *sess; +	struct qla_tgt_cmd *cmd; +	unsigned long flags; +	uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; + +	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, +		"qla_target(%d): Unable to find wwn login" +		" (s_id %x:%x:%x), trying to create it manually\n", +		vha->vp_idx, s_id[0], s_id[1], s_id[2]); + +	if (op->atio.u.raw.entry_count > 1) { +		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, +		        "Dropping multy entry atio %p\n", &op->atio); +		goto out_term; +	} + +	mutex_lock(&vha->vha_tgt.tgt_mutex); +	sess = qlt_make_local_sess(vha, s_id); +	/* sess has an extra creation ref. */ +	mutex_unlock(&vha->vha_tgt.tgt_mutex); + +	if (!sess) +		goto out_term; +	/* +	 * Now obtain a pre-allocated session tag using the original op->atio +	 * packet header, and dispatch into __qlt_do_work() using the existing +	 * process context. +	 */ +	cmd = qlt_get_tag(vha, sess, &op->atio); +	if (!cmd) { +		spin_lock_irqsave(&ha->hardware_lock, flags); +		qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);  		ha->tgt.tgt_ops->put_sess(sess); +		spin_unlock_irqrestore(&ha->hardware_lock, flags); +		kfree(op); +		return; +	} +	/* +	 * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release +	 * the extra reference taken above by qlt_make_local_sess() +	 */ +	__qlt_do_work(cmd); +	kfree(op); +	return; + +out_term: +	spin_lock_irqsave(&ha->hardware_lock, flags); +	qlt_send_term_exchange(vha, NULL, &op->atio, 1);  	spin_unlock_irqrestore(&ha->hardware_lock, flags); +	kfree(op); +  }  /* ha->hardware_lock supposed to be held on entry */ @@ -2576,7 +3259,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,  	struct atio_from_isp *atio)  {  	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; +	struct qla_tgt_sess *sess;  	struct qla_tgt_cmd *cmd;  	if (unlikely(tgt->tgt_stop)) { @@ -2585,20 +3269,31 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,  		return -EFAULT;  	} -	cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC); +	sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id); +	if (unlikely(!sess)) { +		struct qla_tgt_sess_op *op = kzalloc(sizeof(struct qla_tgt_sess_op), +						     GFP_ATOMIC); +		if (!op) +			return -ENOMEM; + +		memcpy(&op->atio, atio, sizeof(*atio)); +		INIT_WORK(&op->work, qlt_create_sess_from_atio); +		queue_work(qla_tgt_wq, &op->work); +		return 0; +	} +	/* +	 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. +	 */ +	kref_get(&sess->se_sess->sess_kref); + +	cmd = qlt_get_tag(vha, sess, atio);  	if (!cmd) {  		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,  		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); +		ha->tgt.tgt_ops->put_sess(sess);  		return -ENOMEM;  	} -	INIT_LIST_HEAD(&cmd->cmd_list); - -	memcpy(&cmd->atio, atio, sizeof(*atio)); -	cmd->state = QLA_TGT_STATE_NEW; -	cmd->tgt = ha->tgt.qla_tgt; -	cmd->vha = vha; -  	INIT_WORK(&cmd->work, qlt_do_work);  	queue_work(qla_tgt_wq, &cmd->work);  	return 0; @@ -2722,7 +3417,7 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)  	uint32_t lun, unpacked_lun;  	int lun_size, fn; -	tgt = ha->tgt.qla_tgt; +	tgt = vha->vha_tgt.qla_tgt;  	lun = a->u.isp24.fcp_cmnd.lun;  	lun_size = sizeof(a->u.isp24.fcp_cmnd.lun); @@ -2796,7 +3491,7 @@ static int qlt_abort_task(struct scsi_qla_host *vha,  		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,  		    "qla_target(%d): task abort for unexisting "  		    "session\n", vha->vp_idx); -		return qlt_sched_sess_work(ha->tgt.qla_tgt, +		return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,  		    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));  	} @@ -2809,7 +3504,6 @@ static int qlt_abort_task(struct scsi_qla_host *vha,  static int qlt_24xx_handle_els(struct scsi_qla_host *vha,  	struct imm_ntfy_from_isp *iocb)  { -	struct qla_hw_data *ha = vha->hw;  	int res = 0;  	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, @@ -2827,7 +3521,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,  	case ELS_PDISC:  	case ELS_ADISC:  	{ -		struct qla_tgt *tgt = ha->tgt.qla_tgt; +		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  		if (tgt->link_reinit_iocb_pending) {  			qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,  			    0, 0, 0, 0, 0, 0); @@ -3185,7 +3879,8 @@ restart:  		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,  		    "SRR cmd %p (se_cmd %p, tag %d, op %x), "  		    "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag, -		    se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset); +		    se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, +		    cmd->sg_cnt, cmd->offset);  		qlt_handle_srr(vha, sctio, imm); @@ -3201,8 +3896,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,  	struct imm_ntfy_from_isp *iocb)  {  	struct qla_tgt_srr_imm *imm; -	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  	struct qla_tgt_srr_ctio *sctio;  	tgt->imm_srr_id++; @@ -3312,7 +4006,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,  	case IMM_NTFY_LIP_LINK_REINIT:  	{ -		struct qla_tgt *tgt = ha->tgt.qla_tgt; +		struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,  		    "qla_target(%d): LINK REINIT (loop %#x, "  		    "subcode %x)\n", vha->vp_idx, @@ -3488,7 +4182,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,  	struct atio_from_isp *atio)  {  	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  	int rc;  	if (unlikely(tgt == NULL)) { @@ -3510,11 +4204,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,  	switch (atio->u.raw.entry_type) {  	case ATIO_TYPE7:  		ql_dbg(ql_dbg_tgt, vha, 0xe02d, -		    "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, " -		    "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n", +		    "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",  		    vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,  		    atio->u.isp24.fcp_cmnd.rddata,  		    atio->u.isp24.fcp_cmnd.wrdata, +		    atio->u.isp24.fcp_cmnd.cdb[0],  		    atio->u.isp24.fcp_cmnd.add_cdb_len,  		    be32_to_cpu(get_unaligned((uint32_t *)  			&atio->u.isp24.fcp_cmnd.add_cdb[ @@ -3590,7 +4284,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,  static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)  {  	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  	if (unlikely(tgt == NULL)) {  		ql_dbg(ql_dbg_tgt, vha, 0xe05d, @@ -3612,11 +4306,13 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)  	tgt->irq_cmd_count++;  	switch (pkt->entry_type) { +	case CTIO_CRC2:  	case CTIO_TYPE7:  	{  		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; -		ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n", -		    vha->vp_idx); +		ql_dbg(ql_dbg_tgt, vha, 0xe030, +			"CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n", +			entry->entry_type, vha->vp_idx);  		qlt_do_ctio_completion(vha, entry->handle,  		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),  		    entry); @@ -3793,7 +4489,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,  	uint16_t *mailbox)  {  	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  	int login_code;  	ql_dbg(ql_dbg_tgt, vha, 0xe039, @@ -3923,14 +4619,14 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,  static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,  	uint8_t *s_id)  { -	struct qla_hw_data *ha = vha->hw;  	struct qla_tgt_sess *sess = NULL;  	fc_port_t *fcport = NULL;  	int rc, global_resets;  	uint16_t loop_id = 0;  retry: -	global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count); +	global_resets = +	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);  	rc = qla24xx_get_loop_id(vha, s_id, &loop_id);  	if (rc != 0) { @@ -3957,12 +4653,13 @@ retry:  		return NULL;  	if (global_resets != -	    atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) { +	    atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {  		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,  		    "qla_target(%d): global reset during session discovery "  		    "(counter was %d, new %d), retrying", vha->vp_idx,  		    global_resets, -		    atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)); +		    atomic_read(&vha->vha_tgt. +			qla_tgt->tgt_global_resets_count));  		goto retry;  	} @@ -3997,10 +4694,10 @@ static void qlt_abort_work(struct qla_tgt *tgt,  	if (!sess) {  		spin_unlock_irqrestore(&ha->hardware_lock, flags); -		mutex_lock(&ha->tgt.tgt_mutex); +		mutex_lock(&vha->vha_tgt.tgt_mutex);  		sess = qlt_make_local_sess(vha, s_id);  		/* sess has got an extra creation ref */ -		mutex_unlock(&ha->tgt.tgt_mutex); +		mutex_unlock(&vha->vha_tgt.tgt_mutex);  		spin_lock_irqsave(&ha->hardware_lock, flags);  		if (!sess) @@ -4051,10 +4748,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,  	if (!sess) {  		spin_unlock_irqrestore(&ha->hardware_lock, flags); -		mutex_lock(&ha->tgt.tgt_mutex); +		mutex_lock(&vha->vha_tgt.tgt_mutex);  		sess = qlt_make_local_sess(vha, s_id);  		/* sess has got an extra creation ref */ -		mutex_unlock(&ha->tgt.tgt_mutex); +		mutex_unlock(&vha->vha_tgt.tgt_mutex);  		spin_lock_irqsave(&ha->hardware_lock, flags);  		if (!sess) @@ -4140,9 +4837,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)  	}  	ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, -	    "Registering target for host %ld(%p)", base_vha->host_no, ha); +	    "Registering target for host %ld(%p).\n", base_vha->host_no, ha); -	BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL)); +	BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);  	tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);  	if (!tgt) { @@ -4170,7 +4867,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)  	INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);  	atomic_set(&tgt->tgt_global_resets_count, 0); -	ha->tgt.qla_tgt = tgt; +	base_vha->vha_tgt.qla_tgt = tgt;  	ql_dbg(ql_dbg_tgt, base_vha, 0xe067,  		"qla_target(%d): using 64 Bit PCI addressing", @@ -4181,6 +4878,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)  	tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;  	tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; +	if (base_vha->fc_vport) +		return 0; +  	mutex_lock(&qla_tgt_mutex);  	list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);  	mutex_unlock(&qla_tgt_mutex); @@ -4191,16 +4891,20 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)  /* Must be called under tgt_host_action_mutex */  int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)  { -	if (!ha->tgt.qla_tgt) +	if (!vha->vha_tgt.qla_tgt)  		return 0; +	if (vha->fc_vport) { +		qlt_release(vha->vha_tgt.qla_tgt); +		return 0; +	}  	mutex_lock(&qla_tgt_mutex); -	list_del(&ha->tgt.qla_tgt->tgt_list_entry); +	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);  	mutex_unlock(&qla_tgt_mutex);  	ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",  	    vha->host_no, ha); -	qlt_release(ha->tgt.qla_tgt); +	qlt_release(vha->vha_tgt.qla_tgt);  	return 0;  } @@ -4234,8 +4938,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,   * @callback:  lport initialization callback for tcm_qla2xxx code   * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data   */ -int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn, -	int (*callback)(struct scsi_qla_host *), void *target_lport_ptr) +int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn, +		       u64 npiv_wwpn, u64 npiv_wwnn, +		       int (*callback)(struct scsi_qla_host *, void *, u64, u64))  {  	struct qla_tgt *tgt;  	struct scsi_qla_host *vha; @@ -4254,19 +4959,22 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,  		if (!host)  			continue; -		if (ha->tgt.tgt_ops != NULL) -			continue; -  		if (!(host->hostt->supported_mode & MODE_TARGET))  			continue;  		spin_lock_irqsave(&ha->hardware_lock, flags); -		if (host->active_mode & MODE_TARGET) { +		if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {  			pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",  			    host->host_no);  			spin_unlock_irqrestore(&ha->hardware_lock, flags);  			continue;  		} +		if (tgt->tgt_stop) { +			pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n", +				 host->host_no); +			spin_unlock_irqrestore(&ha->hardware_lock, flags); +			continue; +		}  		spin_unlock_irqrestore(&ha->hardware_lock, flags);  		if (!scsi_host_get(host)) { @@ -4275,22 +4983,16 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,  			    " qla2xxx scsi_host\n");  			continue;  		} -		qlt_lport_dump(vha, wwpn, b); +		qlt_lport_dump(vha, phys_wwpn, b);  		if (memcmp(vha->port_name, b, WWN_SIZE)) {  			scsi_host_put(host);  			continue;  		} -		/* -		 * Setup passed parameters ahead of invoking callback -		 */ -		ha->tgt.tgt_ops = qla_tgt_ops; -		ha->tgt.target_lport_ptr = target_lport_ptr; -		rc = (*callback)(vha); -		if (rc != 0) { -			ha->tgt.tgt_ops = NULL; -			ha->tgt.target_lport_ptr = NULL; -		} +		rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn); +		if (rc != 0) +			scsi_host_put(host); +  		mutex_unlock(&qla_tgt_mutex);  		return rc;  	} @@ -4312,7 +5014,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)  	/*  	 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data  	 */ -	ha->tgt.target_lport_ptr = NULL; +	vha->vha_tgt.target_lport_ptr = NULL;  	ha->tgt.tgt_ops = NULL;  	/*  	 * Release the Scsi_Host reference for the underlying qla2xxx host @@ -4374,8 +5076,9 @@ void  qlt_enable_vha(struct scsi_qla_host *vha)  {  	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  	unsigned long flags; +	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);  	if (!tgt) {  		ql_dbg(ql_dbg_tgt, vha, 0xe069, @@ -4390,9 +5093,14 @@ qlt_enable_vha(struct scsi_qla_host *vha)  	qlt_set_mode(vha);  	spin_unlock_irqrestore(&ha->hardware_lock, flags); -	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); -	qla2xxx_wake_dpc(vha); -	qla2x00_wait_for_hba_online(vha); +	if (vha->vp_idx) { +		qla24xx_disable_vp(vha); +		qla24xx_enable_vp(vha); +	} else { +		set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); +		qla2xxx_wake_dpc(base_vha); +		qla2x00_wait_for_hba_online(base_vha); +	}  }  EXPORT_SYMBOL(qlt_enable_vha); @@ -4405,7 +5113,7 @@ void  qlt_disable_vha(struct scsi_qla_host *vha)  {  	struct qla_hw_data *ha = vha->hw; -	struct qla_tgt *tgt = ha->tgt.qla_tgt; +	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;  	unsigned long flags;  	if (!tgt) { @@ -4436,8 +5144,10 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)  	if (!qla_tgt_mode_enabled(vha))  		return; -	mutex_init(&ha->tgt.tgt_mutex); -	mutex_init(&ha->tgt.tgt_host_action_mutex); +	vha->vha_tgt.qla_tgt = NULL; + +	mutex_init(&vha->vha_tgt.tgt_mutex); +	mutex_init(&vha->vha_tgt.tgt_host_action_mutex);  	qlt_clear_mode(vha); @@ -4448,6 +5158,8 @@ qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)  	 * assigning the value appropriately.  	 */  	ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + +	qlt_add_target(ha, vha);  }  void @@ -4735,6 +5447,7 @@ qlt_24xx_process_response_error(struct scsi_qla_host *vha,  	case ABTS_RESP_24XX:  	case CTIO_TYPE7:  	case NOTIFY_ACK_TYPE: +	case CTIO_CRC2:  		return 1;  	default:  		return 0; @@ -4766,8 +5479,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)  		ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;  	} -	mutex_init(&ha->tgt.tgt_mutex); -	mutex_init(&ha->tgt.tgt_host_action_mutex); +	mutex_init(&base_vha->vha_tgt.tgt_mutex); +	mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);  	qlt_clear_mode(base_vha);  } @@ -4878,23 +5591,13 @@ int __init qlt_init(void)  	if (!QLA_TGT_MODE_ENABLED())  		return 0; -	qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep", -	    sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0, -	    NULL); -	if (!qla_tgt_cmd_cachep) { -		ql_log(ql_log_fatal, NULL, 0xe06c, -		    "kmem_cache_create for qla_tgt_cmd_cachep failed\n"); -		return -ENOMEM; -	} -  	qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",  	    sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct  	    qla_tgt_mgmt_cmd), 0, NULL);  	if (!qla_tgt_mgmt_cmd_cachep) {  		ql_log(ql_log_fatal, NULL, 0xe06d,  		    "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n"); -		ret = -ENOMEM; -		goto out; +		return -ENOMEM;  	}  	qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab, @@ -4922,8 +5625,6 @@ out_cmd_mempool:  	mempool_destroy(qla_tgt_mgmt_cmd_mempool);  out_mgmt_cmd_cachep:  	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); -out: -	kmem_cache_destroy(qla_tgt_cmd_cachep);  	return ret;  } @@ -4935,5 +5636,4 @@ void qlt_exit(void)  	destroy_workqueue(qla_tgt_wq);  	mempool_destroy(qla_tgt_mgmt_cmd_mempool);  	kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep); -	kmem_cache_destroy(qla_tgt_cmd_cachep);  } diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index b33e411f28a..d1d24fb0160 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -293,6 +293,7 @@ struct ctio_to_2xxx {  #define CTIO_ABORTED			0x02  #define CTIO_INVALID_RX_ID		0x08  #define CTIO_TIMEOUT			0x0B +#define CTIO_DIF_ERROR			0x0C     /* DIF error detected  */  #define CTIO_LIP_RESET			0x0E  #define CTIO_TARGET_RESET		0x17  #define CTIO_PORT_UNAVAILABLE		0x28 @@ -315,7 +316,7 @@ struct fcp_hdr {  	uint8_t  seq_id;  	uint8_t  df_ctl;  	uint16_t seq_cnt; -	uint16_t ox_id; +	__be16   ox_id;  	uint16_t rx_id;  	uint32_t parameter;  } __packed; @@ -440,9 +441,9 @@ struct ctio7_to_24xx {  	union {  		struct {  			uint16_t reserved1; -			uint16_t flags; +			__le16 flags;  			uint32_t residual; -			uint16_t ox_id; +			__le16 ox_id;  			uint16_t scsi_status;  			uint32_t relative_offset;  			uint32_t reserved2; @@ -457,7 +458,7 @@ struct ctio7_to_24xx {  			uint16_t sense_length;  			uint16_t flags;  			uint32_t residual; -			uint16_t ox_id; +			__le16 ox_id;  			uint16_t scsi_status;  			uint16_t response_len;  			uint16_t reserved; @@ -498,11 +499,12 @@ struct ctio7_from_24xx {  #define CTIO7_FLAGS_DONT_RET_CTIO	BIT_8  #define CTIO7_FLAGS_STATUS_MODE_0	0  #define CTIO7_FLAGS_STATUS_MODE_1	BIT_6 +#define CTIO7_FLAGS_STATUS_MODE_2	BIT_7  #define CTIO7_FLAGS_EXPLICIT_CONFORM	BIT_5  #define CTIO7_FLAGS_CONFIRM_SATISF	BIT_4  #define CTIO7_FLAGS_DSD_PTR		BIT_2 -#define CTIO7_FLAGS_DATA_IN		BIT_1 -#define CTIO7_FLAGS_DATA_OUT		BIT_0 +#define CTIO7_FLAGS_DATA_IN		BIT_1 /* data to initiator */ +#define CTIO7_FLAGS_DATA_OUT		BIT_0 /* data from initiator */  #define ELS_PLOGI			0x3  #define ELS_FLOGI			0x4 @@ -514,6 +516,68 @@ struct ctio7_from_24xx {  #define ELS_ADISC			0x52  /* + *CTIO Type CRC_2 IOCB + */ +struct ctio_crc2_to_fw { +	uint8_t entry_type;		/* Entry type. */ +#define CTIO_CRC2 0x7A +	uint8_t entry_count;		/* Entry count. */ +	uint8_t sys_define;		/* System defined. */ +	uint8_t entry_status;		/* Entry Status. */ + +	uint32_t handle;		/* System handle. */ +	uint16_t nport_handle;		/* N_PORT handle. */ +	__le16 timeout;		/* Command timeout. */ + +	uint16_t dseg_count;		/* Data segment count. */ +	uint8_t  vp_index; +	uint8_t  add_flags;		/* additional flags */ +#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 + +	uint8_t  initiator_id[3];	/* initiator ID */ +	uint8_t  reserved1; +	uint32_t exchange_addr;		/* rcv exchange address */ +	uint16_t reserved2; +	__le16 flags;			/* refer to CTIO7 flags values */ +	uint32_t residual; +	__le16 ox_id; +	uint16_t scsi_status; +	__le32 relative_offset; +	uint32_t reserved5; +	__le32 transfer_length;		/* total fc transfer length */ +	uint32_t reserved6; +	__le32 crc_context_address[2];/* Data segment address. */ +	uint16_t crc_context_len;	/* Data segment length. */ +	uint16_t reserved_1;		/* MUST be set to 0. */ +} __packed; + +/* CTIO Type CRC_x Status IOCB */ +struct ctio_crc_from_fw { +	uint8_t entry_type;		/* Entry type. */ +	uint8_t entry_count;		/* Entry count. */ +	uint8_t sys_define;		/* System defined. */ +	uint8_t entry_status;		/* Entry Status. */ + +	uint32_t handle;		/* System handle. */ +	uint16_t status; +	uint16_t timeout;		/* Command timeout. */ +	uint16_t dseg_count;		/* Data segment count. */ +	uint32_t reserved1; +	uint16_t state_flags; +#define CTIO_CRC_SF_DIF_CHOPPED BIT_4 + +	uint32_t exchange_address;	/* rcv exchange address */ +	uint16_t reserved2; +	uint16_t flags; +	uint32_t resid_xfer_length; +	uint16_t ox_id; +	uint8_t  reserved3[12]; +	uint16_t runt_guard;		/* reported runt blk guard */ +	uint8_t  actual_dif[8]; +	uint8_t  expected_dif[8]; +} __packed; + +/*   * ISP queue - ABTS received/response entries structure definition for 24xx.   */  #define ABTS_RECV_24XX		0x54 /* ABTS received (for 24xx) */ @@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl {  	int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,  			unsigned char *, uint32_t, int, int, int);  	void (*handle_data)(struct qla_tgt_cmd *); +	void (*handle_dif_err)(struct qla_tgt_cmd *);  	int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,  			uint32_t);  	void (*free_cmd)(struct qla_tgt_cmd *); @@ -805,6 +870,12 @@ struct qla_tgt {  	struct list_head tgt_list_entry;  }; +struct qla_tgt_sess_op { +	struct scsi_qla_host *vha; +	struct atio_from_isp atio; +	struct work_struct work; +}; +  /*   * Equivilant to IT Nexus (Initiator-Target)   */ @@ -829,9 +900,9 @@ struct qla_tgt_sess {  };  struct qla_tgt_cmd { +	struct se_cmd se_cmd;  	struct qla_tgt_sess *sess;  	int state; -	struct se_cmd se_cmd;  	struct work_struct free_work;  	struct work_struct work;  	/* Sense buffer that will be mapped into outgoing status */ @@ -843,6 +914,7 @@ struct qla_tgt_cmd {  	unsigned int free_sg:1;  	unsigned int aborted:1; /* Needed in case of SRR */  	unsigned int write_data_transferred:1; +	unsigned int ctx_dsd_alloced:1;  	struct scatterlist *sg;	/* cmd data buffer SG vector */  	int sg_cnt;		/* SG segments count */ @@ -855,9 +927,14 @@ struct qla_tgt_cmd {  	uint16_t loop_id;	/* to save extra sess dereferences */  	struct qla_tgt *tgt;	/* to save extra sess dereferences */  	struct scsi_qla_host *vha; -	struct list_head cmd_list;  	struct atio_from_isp atio; +	/* t10dif */ +	struct scatterlist *prot_sg; +	uint32_t prot_sg_cnt; +	uint32_t blk_sz; +	struct crc_context *ctx; +  };  struct qla_tgt_sess_work_param { @@ -902,6 +979,10 @@ struct qla_tgt_prm {  	int sense_buffer_len;  	int residual;  	int add_status_pkt; +	/* dif */ +	struct scatterlist *prot_sg; +	uint16_t prot_seg_cnt; +	uint16_t tot_dsds;  };  struct qla_tgt_srr_imm { @@ -932,8 +1013,8 @@ void qlt_disable_vha(struct scsi_qla_host *);   */  extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);  extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *); -extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64, -			int (*callback)(struct scsi_qla_host *), void *); +extern int qlt_lport_register(void *, u64, u64, u64, +			int (*callback)(struct scsi_qla_host *, void *, u64, u64));  extern void qlt_lport_deregister(struct scsi_qla_host *);  extern void qlt_unreg_sess(struct qla_tgt_sess *);  extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); @@ -977,6 +1058,8 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,  extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);  extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);  extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); +extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *); +extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);  extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);  extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);  extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); @@ -1002,7 +1085,7 @@ extern void qlt_modify_vp_config(struct scsi_qla_host *,  extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);  extern int qlt_mem_alloc(struct qla_hw_data *);  extern void qlt_mem_free(struct qla_hw_data *); -extern void qlt_stop_phase1(struct qla_tgt *); +extern int qlt_stop_phase1(struct qla_tgt *);  extern void qlt_stop_phase2(struct qla_tgt *);  extern irqreturn_t qla83xx_msix_atio_q(int, void *);  extern void qlt_83xx_iospace_config(struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c new file mode 100644 index 00000000000..cb9a0c4bc41 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -0,0 +1,956 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c)  2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ +#include "qla_def.h" +#include "qla_tmpl.h" + +/* note default template is in big endian */ +static const uint32_t ql27xx_fwdt_default_template[] = { +	0x63000000, 0xa4000000, 0x7c050000, 0x00000000, +	0x30000000, 0x01000000, 0x00000000, 0xc0406eb4, +	0x00000000, 0x00000000, 0x00000000, 0x00000000, +	0x00000000, 0x00000000, 0x00000000, 0x00000000, +	0x00000000, 0x00000000, 0x00000000, 0x00000000, +	0x00000000, 0x00000000, 0x00000000, 0x00000000, +	0x00000000, 0x00000000, 0x00000000, 0x00000000, +	0x00000000, 0x00000000, 0x00000000, 0x00000000, +	0x00000000, 0x00000000, 0x00000000, 0x00000000, +	0x00000000, 0x00000000, 0x00000000, 0x00000000, +	0x00000000, 0x04010000, 0x14000000, 0x00000000, +	0x02000000, 0x44000000, 0x09010000, 0x10000000, +	0x00000000, 0x02000000, 0x01010000, 0x1c000000, +	0x00000000, 0x02000000, 0x00600000, 0x00000000, +	0xc0000000, 0x01010000, 0x1c000000, 0x00000000, +	0x02000000, 0x00600000, 0x00000000, 0xcc000000, +	0x01010000, 0x1c000000, 0x00000000, 0x02000000, +	0x10600000, 0x00000000, 0xd4000000, 0x01010000, +	0x1c000000, 0x00000000, 0x02000000, 0x700f0000, +	0x00000060, 0xf0000000, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x00700000, 0x041000c0, +	0x00010000, 0x18000000, 0x00000000, 0x02000000, +	0x10700000, 0x041000c0, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x40700000, 0x041000c0, +	0x01010000, 0x1c000000, 0x00000000, 0x02000000, +	0x007c0000, 0x01000000, 0xc0000000, 0x00010000, +	0x18000000, 0x00000000, 0x02000000, 0x007c0000, +	0x040300c4, 0x00010000, 0x18000000, 0x00000000, +	0x02000000, 0x007c0000, 0x040100c0, 0x01010000, +	0x1c000000, 0x00000000, 0x02000000, 0x007c0000, +	0x00000000, 0xc0000000, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x007c0000, 0x04200000, +	0x0b010000, 0x18000000, 0x00000000, 0x02000000, +	0x0c000000, 0x00000000, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000000b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000010b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000020b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000030b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000040b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000050b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000060b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000070b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000080b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x000090b0, 0x02010000, 0x20000000, +	0x00000000, 0x02000000, 0x700f0000, 0x040100fc, +	0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x0a000000, 0x040100c0, +	0x00010000, 0x18000000, 0x00000000, 0x02000000, +	0x0a000000, 0x04200080, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x00be0000, 0x041000c0, +	0x00010000, 0x18000000, 0x00000000, 0x02000000, +	0x10be0000, 0x041000c0, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x20be0000, 0x041000c0, +	0x00010000, 0x18000000, 0x00000000, 0x02000000, +	0x30be0000, 0x041000c0, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x00b00000, 0x041000c0, +	0x00010000, 0x18000000, 0x00000000, 0x02000000, +	0x10b00000, 0x041000c0, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x20b00000, 0x041000c0, +	0x00010000, 0x18000000, 0x00000000, 0x02000000, +	0x30b00000, 0x041000c0, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x00300000, 0x041000c0, +	0x00010000, 0x18000000, 0x00000000, 0x02000000, +	0x10300000, 0x041000c0, 0x00010000, 0x18000000, +	0x00000000, 0x02000000, 0x20300000, 0x041000c0, +	0x00010000, 0x18000000, 0x00000000, 0x02000000, +	0x30300000, 0x041000c0, 0x0a010000, 0x10000000, +	0x00000000, 0x02000000, 0x06010000, 0x1c000000, +	0x00000000, 0x02000000, 0x01000000, 0x00000200, +	0xff230200, 0x06010000, 0x1c000000, 0x00000000, +	0x02000000, 0x02000000, 0x00001000, 0x00000000, +	0x07010000, 0x18000000, 0x00000000, 0x02000000, +	0x00000000, 0x01000000, 0x07010000, 0x18000000, +	0x00000000, 0x02000000, 0x00000000, 0x02000000, +	0x07010000, 0x18000000, 0x00000000, 0x02000000, +	0x00000000, 0x03000000, 0x0d010000, 0x14000000, +	0x00000000, 0x02000000, 0x00000000, 0xff000000, +	0x10000000, 0x00000000, 0x00000080, +}; + +static inline void __iomem * +qla27xx_isp_reg(struct scsi_qla_host *vha) +{ +	return &vha->hw->iobase->isp24; +} + +static inline void +qla27xx_insert16(uint16_t value, void *buf, ulong *len) +{ +	if (buf) { +		buf += *len; +		*(__le16 *)buf = cpu_to_le16(value); +	} +	*len += sizeof(value); +} + +static inline void +qla27xx_insert32(uint32_t value, void *buf, ulong *len) +{ +	if (buf) { +		buf += *len; +		*(__le32 *)buf = cpu_to_le32(value); +	} +	*len += sizeof(value); +} + +static inline void +qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) +{ +	ulong cnt = size; + +	if (buf && mem) { +		buf += *len; +		while (cnt >= sizeof(uint32_t)) { +			*(__le32 *)buf = cpu_to_le32p(mem); +			buf += sizeof(uint32_t); +			mem += sizeof(uint32_t); +			cnt -= sizeof(uint32_t); +		} +		if (cnt) +			memcpy(buf, mem, cnt); +	} +	*len += size; +} + +static inline void +qla27xx_read8(void *window, void *buf, ulong *len) +{ +	uint8_t value = ~0; + +	if (buf) { +		value = RD_REG_BYTE((__iomem void *)window); +		ql_dbg(ql_dbg_misc, NULL, 0xd011, +		    "%s: -> %x\n", __func__, value); +	} +	qla27xx_insert32(value, buf, len); +} + +static inline void +qla27xx_read16(void *window, void *buf, ulong *len) +{ +	uint16_t value = ~0; + +	if (buf) { +		value = RD_REG_WORD((__iomem void *)window); +		ql_dbg(ql_dbg_misc, NULL, 0xd012, +		    "%s: -> %x\n", __func__, value); +	} +	qla27xx_insert32(value, buf, len); +} + +static inline void +qla27xx_read32(void *window, void *buf, ulong *len) +{ +	uint32_t value = ~0; + +	if (buf) { +		value = RD_REG_DWORD((__iomem void *)window); +		ql_dbg(ql_dbg_misc, NULL, 0xd013, +		    "%s: -> %x\n", __func__, value); +	} +	qla27xx_insert32(value, buf, len); +} + +static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *) +{ +	return +	    (width == 1) ? qla27xx_read8 : +	    (width == 2) ? qla27xx_read16 : +			   qla27xx_read32; +} + +static inline void +qla27xx_read_reg(__iomem struct device_reg_24xx *reg, +	uint offset, void *buf, ulong *len) +{ +	void *window = (void *)reg + offset; + +	if (buf) { +		ql_dbg(ql_dbg_misc, NULL, 0xd014, +		    "%s: @%x\n", __func__, offset); +	} +	qla27xx_read32(window, buf, len); +} + +static inline void +qla27xx_write_reg(__iomem struct device_reg_24xx *reg, +	uint offset, uint32_t data, void *buf) +{ +	__iomem void *window = reg + offset; + +	if (buf) { +		ql_dbg(ql_dbg_misc, NULL, 0xd015, +		    "%s: @%x <- %x\n", __func__, offset, data); +		WRT_REG_DWORD(window, data); +	} +} + +static inline void +qla27xx_read_window(__iomem struct device_reg_24xx *reg, +	uint32_t addr, uint offset, uint count, uint width, void *buf, +	ulong *len) +{ +	void *window = (void *)reg + offset; +	void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width); + +	if (buf) { +		ql_dbg(ql_dbg_misc, NULL, 0xd016, +		    "%s: base=%x offset=%x count=%x width=%x\n", +		    __func__, addr, offset, count, width); +	} +	qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); +	while (count--) { +		qla27xx_insert32(addr, buf, len); +		readn(window, buf, len); +		window += width; +		addr++; +	} +} + +static inline void +qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) +{ +	if (buf) +		ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY; +} + +static int +qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ql_dbg(ql_dbg_misc, vha, 0xd100, +	    "%s: nop [%lx]\n", __func__, *len); +	qla27xx_skip_entry(ent, buf); + +	return false; +} + +static int +qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ql_dbg(ql_dbg_misc, vha, 0xd1ff, +	    "%s: end [%lx]\n", __func__, *len); +	qla27xx_skip_entry(ent, buf); + +	/* terminate */ +	return true; +} + +static int +qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + +	ql_dbg(ql_dbg_misc, vha, 0xd200, +	    "%s: rdio t1 [%lx]\n", __func__, *len); +	qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset, +	    ent->t256.reg_count, ent->t256.reg_width, buf, len); + +	return false; +} + +static int +qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + +	ql_dbg(ql_dbg_misc, vha, 0xd201, +	    "%s: wrio t1 [%lx]\n", __func__, *len); +	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf); +	qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf); + +	return false; +} + +static int +qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + +	ql_dbg(ql_dbg_misc, vha, 0xd202, +	    "%s: rdio t2 [%lx]\n", __func__, *len); +	qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf); +	qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset, +	    ent->t258.reg_count, ent->t258.reg_width, buf, len); + +	return false; +} + +static int +qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + +	ql_dbg(ql_dbg_misc, vha, 0xd203, +	    "%s: wrio t2 [%lx]\n", __func__, *len); +	qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf); +	qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf); +	qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf); + +	return false; +} + +static int +qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + +	ql_dbg(ql_dbg_misc, vha, 0xd204, +	    "%s: rdpci [%lx]\n", __func__, *len); +	qla27xx_insert32(ent->t260.pci_offset, buf, len); +	qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len); + +	return false; +} + +static int +qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + +	ql_dbg(ql_dbg_misc, vha, 0xd205, +	    "%s: wrpci [%lx]\n", __func__, *len); +	qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf); + +	return false; +} + +static int +qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ulong dwords; +	ulong start; +	ulong end; + +	ql_dbg(ql_dbg_misc, vha, 0xd206, +	    "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len); +	start = ent->t262.start_addr; +	end = ent->t262.end_addr; + +	if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) { +		; +	} else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) { +		end = vha->hw->fw_memory_size; +		if (buf) +			ent->t262.end_addr = end; +	} else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) { +		start = vha->hw->fw_shared_ram_start; +		end = vha->hw->fw_shared_ram_end; +		if (buf) { +			ent->t262.start_addr = start; +			ent->t262.end_addr = end; +		} +	} else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) { +		ql_dbg(ql_dbg_misc, vha, 0xd021, +		    "%s: unsupported ddr ram\n", __func__); +		qla27xx_skip_entry(ent, buf); +		goto done; +	} else { +		ql_dbg(ql_dbg_misc, vha, 0xd022, +		    "%s: unknown area %u\n", __func__, ent->t262.ram_area); +		qla27xx_skip_entry(ent, buf); +		goto done; +	} + +	if (end < start || end == 0) { +		ql_dbg(ql_dbg_misc, vha, 0xd023, +		    "%s: unusable range (start=%x end=%x)\n", __func__, +		    ent->t262.end_addr, ent->t262.start_addr); +		qla27xx_skip_entry(ent, buf); +		goto done; +	} + +	dwords = end - start + 1; +	if (buf) { +		ql_dbg(ql_dbg_misc, vha, 0xd024, +		    "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); +		buf += *len; +		qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); +	} +	*len += dwords * sizeof(uint32_t); +done: +	return false; +} + +static int +qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	uint count = 0; +	uint i; +	uint length; + +	ql_dbg(ql_dbg_misc, vha, 0xd207, +	    "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len); +	if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { +		for (i = 0; i < vha->hw->max_req_queues; i++) { +			struct req_que *req = vha->hw->req_q_map[i]; +			if (req || !buf) { +				length = req ? +				    req->length : REQUEST_ENTRY_CNT_24XX; +				qla27xx_insert16(i, buf, len); +				qla27xx_insert16(length, buf, len); +				qla27xx_insertbuf(req ? req->ring : NULL, +				    length * sizeof(*req->ring), buf, len); +				count++; +			} +		} +	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { +		for (i = 0; i < vha->hw->max_rsp_queues; i++) { +			struct rsp_que *rsp = vha->hw->rsp_q_map[i]; +			if (rsp || !buf) { +				length = rsp ? +				    rsp->length : RESPONSE_ENTRY_CNT_MQ; +				qla27xx_insert16(i, buf, len); +				qla27xx_insert16(length, buf, len); +				qla27xx_insertbuf(rsp ? rsp->ring : NULL, +				    length * sizeof(*rsp->ring), buf, len); +				count++; +			} +		} +	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) { +		ql_dbg(ql_dbg_misc, vha, 0xd025, +		    "%s: unsupported atio queue\n", __func__); +		qla27xx_skip_entry(ent, buf); +	} else { +		ql_dbg(ql_dbg_misc, vha, 0xd026, +		    "%s: unknown queue %u\n", __func__, ent->t263.queue_type); +		qla27xx_skip_entry(ent, buf); +	} + +	if (buf) +		ent->t263.num_queues = count; + +	return false; +} + +static int +qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ql_dbg(ql_dbg_misc, vha, 0xd208, +	    "%s: getfce [%lx]\n", __func__, *len); +	if (vha->hw->fce) { +		if (buf) { +			ent->t264.fce_trace_size = FCE_SIZE; +			ent->t264.write_pointer = vha->hw->fce_wr; +			ent->t264.base_pointer = vha->hw->fce_dma; +			ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0]; +			ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2]; +			ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3]; +			ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4]; +			ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5]; +			ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6]; +		} +		qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len); +	} else { +		ql_dbg(ql_dbg_misc, vha, 0xd027, +		    "%s: missing fce\n", __func__); +		qla27xx_skip_entry(ent, buf); +	} + +	return false; +} + +static int +qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + +	ql_dbg(ql_dbg_misc, vha, 0xd209, +	    "%s: pause risc [%lx]\n", __func__, *len); +	if (buf) +		qla24xx_pause_risc(reg, vha->hw); + +	return false; +} + +static int +qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ql_dbg(ql_dbg_misc, vha, 0xd20a, +	    "%s: reset risc [%lx]\n", __func__, *len); +	if (buf) +		qla24xx_soft_reset(vha->hw); + +	return false; +} + +static int +qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + +	ql_dbg(ql_dbg_misc, vha, 0xd20b, +	    "%s: dis intr [%lx]\n", __func__, *len); +	qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf); + +	return false; +} + +static int +qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ql_dbg(ql_dbg_misc, vha, 0xd20c, +	    "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len); +	if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) { +		if (vha->hw->eft) { +			if (buf) { +				ent->t268.buf_size = EFT_SIZE; +				ent->t268.start_addr = vha->hw->eft_dma; +			} +			qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len); +		} else { +			ql_dbg(ql_dbg_misc, vha, 0xd028, +			    "%s: missing eft\n", __func__); +			qla27xx_skip_entry(ent, buf); +		} +	} else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) { +		ql_dbg(ql_dbg_misc, vha, 0xd029, +		    "%s: unsupported exchange offload buffer\n", __func__); +		qla27xx_skip_entry(ent, buf); +	} else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) { +		ql_dbg(ql_dbg_misc, vha, 0xd02a, +		    "%s: unsupported extended login buffer\n", __func__); +		qla27xx_skip_entry(ent, buf); +	} else { +		ql_dbg(ql_dbg_misc, vha, 0xd02b, +		    "%s: unknown buf %x\n", __func__, ent->t268.buf_type); +		qla27xx_skip_entry(ent, buf); +	} + +	return false; +} + +static int +qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ql_dbg(ql_dbg_misc, vha, 0xd20d, +	    "%s: scratch [%lx]\n", __func__, *len); +	qla27xx_insert32(0xaaaaaaaa, buf, len); +	qla27xx_insert32(0xbbbbbbbb, buf, len); +	qla27xx_insert32(0xcccccccc, buf, len); +	qla27xx_insert32(0xdddddddd, buf, len); +	qla27xx_insert32(*len + sizeof(uint32_t), buf, len); +	if (buf) +		ent->t269.scratch_size = 5 * sizeof(uint32_t); + +	return false; +} + +static int +qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); +	ulong dwords = ent->t270.count; +	ulong addr = ent->t270.addr; + +	ql_dbg(ql_dbg_misc, vha, 0xd20e, +	    "%s: rdremreg [%lx]\n", __func__, *len); +	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); +	while (dwords--) { +		qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf); +		qla27xx_insert32(addr, buf, len); +		qla27xx_read_reg(reg, 0xc4, buf, len); +		addr += sizeof(uint32_t); +	} + +	return false; +} + +static int +qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); +	ulong addr = ent->t271.addr; +	ulong data = ent->t271.data; + +	ql_dbg(ql_dbg_misc, vha, 0xd20f, +	    "%s: wrremreg [%lx]\n", __func__, *len); +	qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); +	qla27xx_write_reg(reg, 0xc4, data, buf); +	qla27xx_write_reg(reg, 0xc0, addr, buf); + +	return false; +} + +static int +qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ulong dwords = ent->t272.count; +	ulong start = ent->t272.addr; + +	ql_dbg(ql_dbg_misc, vha, 0xd210, +	    "%s: rdremram [%lx]\n", __func__, *len); +	if (buf) { +		ql_dbg(ql_dbg_misc, vha, 0xd02c, +		    "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords); +		buf += *len; +		qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf); +	} +	*len += dwords * sizeof(uint32_t); + +	return false; +} + +static int +qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ulong dwords = ent->t273.count; +	ulong addr = ent->t273.addr; +	uint32_t value; + +	ql_dbg(ql_dbg_misc, vha, 0xd211, +	    "%s: pcicfg [%lx]\n", __func__, *len); +	while (dwords--) { +		value = ~0; +		if (pci_read_config_dword(vha->hw->pdev, addr, &value)) +			ql_dbg(ql_dbg_misc, vha, 0xd02d, +			    "%s: failed pcicfg read at %lx\n", __func__, addr); +		qla27xx_insert32(addr, buf, len); +		qla27xx_insert32(value, buf, len); +		addr += sizeof(uint32_t); +	} + +	return false; +} + +static int +qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	uint count = 0; +	uint i; + +	ql_dbg(ql_dbg_misc, vha, 0xd212, +	    "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len); +	if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { +		for (i = 0; i < vha->hw->max_req_queues; i++) { +			struct req_que *req = vha->hw->req_q_map[i]; +			if (req || !buf) { +				qla27xx_insert16(i, buf, len); +				qla27xx_insert16(1, buf, len); +				qla27xx_insert32(req && req->out_ptr ? +				    *req->out_ptr : 0, buf, len); +				count++; +			} +		} +	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { +		for (i = 0; i < vha->hw->max_rsp_queues; i++) { +			struct rsp_que *rsp = vha->hw->rsp_q_map[i]; +			if (rsp || !buf) { +				qla27xx_insert16(i, buf, len); +				qla27xx_insert16(1, buf, len); +				qla27xx_insert32(rsp && rsp->in_ptr ? +				    *rsp->in_ptr : 0, buf, len); +				count++; +			} +		} +	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) { +		ql_dbg(ql_dbg_misc, vha, 0xd02e, +		    "%s: unsupported atio queue\n", __func__); +		qla27xx_skip_entry(ent, buf); +	} else { +		ql_dbg(ql_dbg_misc, vha, 0xd02f, +		    "%s: unknown queue %u\n", __func__, ent->t274.queue_type); +		qla27xx_skip_entry(ent, buf); +	} + +	if (buf) +		ent->t274.num_queues = count; + +	if (!count) +		qla27xx_skip_entry(ent, buf); + +	return false; +} + +static int +qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) +{ +	ql_dbg(ql_dbg_misc, vha, 0xd2ff, +	    "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len); +	qla27xx_skip_entry(ent, buf); + +	return false; +} + +struct qla27xx_fwdt_entry_call { +	int type; +	int (*call)( +	    struct scsi_qla_host *, +	    struct qla27xx_fwdt_entry *, +	    void *, +	    ulong *); +}; + +static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = { +	{ ENTRY_TYPE_NOP		, qla27xx_fwdt_entry_t0    } , +	{ ENTRY_TYPE_TMP_END		, qla27xx_fwdt_entry_t255  } , +	{ ENTRY_TYPE_RD_IOB_T1		, qla27xx_fwdt_entry_t256  } , +	{ ENTRY_TYPE_WR_IOB_T1		, qla27xx_fwdt_entry_t257  } , +	{ ENTRY_TYPE_RD_IOB_T2		, qla27xx_fwdt_entry_t258  } , +	{ ENTRY_TYPE_WR_IOB_T2		, qla27xx_fwdt_entry_t259  } , +	{ ENTRY_TYPE_RD_PCI		, qla27xx_fwdt_entry_t260  } , +	{ ENTRY_TYPE_WR_PCI		, qla27xx_fwdt_entry_t261  } , +	{ ENTRY_TYPE_RD_RAM		, qla27xx_fwdt_entry_t262  } , +	{ ENTRY_TYPE_GET_QUEUE		, qla27xx_fwdt_entry_t263  } , +	{ ENTRY_TYPE_GET_FCE		, qla27xx_fwdt_entry_t264  } , +	{ ENTRY_TYPE_PSE_RISC		, qla27xx_fwdt_entry_t265  } , +	{ ENTRY_TYPE_RST_RISC		, qla27xx_fwdt_entry_t266  } , +	{ ENTRY_TYPE_DIS_INTR		, qla27xx_fwdt_entry_t267  } , +	{ ENTRY_TYPE_GET_HBUF		, qla27xx_fwdt_entry_t268  } , +	{ ENTRY_TYPE_SCRATCH		, qla27xx_fwdt_entry_t269  } , +	{ ENTRY_TYPE_RDREMREG		, qla27xx_fwdt_entry_t270  } , +	{ ENTRY_TYPE_WRREMREG		, qla27xx_fwdt_entry_t271  } , +	{ ENTRY_TYPE_RDREMRAM		, qla27xx_fwdt_entry_t272  } , +	{ ENTRY_TYPE_PCICFG		, qla27xx_fwdt_entry_t273  } , +	{ ENTRY_TYPE_GET_SHADOW		, qla27xx_fwdt_entry_t274  } , +	{ -1				, qla27xx_fwdt_entry_other } +}; + +static inline int (*qla27xx_find_entry(int type)) +	(struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *) +{ +	struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list; + +	while (list->type != -1 && list->type != type) +		list++; + +	return list->call; +} + +static inline void * +qla27xx_next_entry(void *p) +{ +	struct qla27xx_fwdt_entry *ent = p; + +	return p + ent->hdr.entry_size; +} + +static void +qla27xx_walk_template(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_template *tmp, void *buf, ulong *len) +{ +	struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset; +	ulong count = tmp->entry_count; + +	ql_dbg(ql_dbg_misc, vha, 0xd01a, +	    "%s: entry count %lx\n", __func__, count); +	while (count--) { +		if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len)) +			break; +		ent = qla27xx_next_entry(ent); +	} +	ql_dbg(ql_dbg_misc, vha, 0xd01b, +	    "%s: len=%lx\n", __func__, *len); +} + +static void +qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp) +{ +	tmp->capture_timestamp = jiffies; +} + +static void +qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) +{ +	uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; +	int rval = 0; + +	rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", +	    v+0, v+1, v+2, v+3, v+4, v+5); + +	tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]; +	tmp->driver_info[1] = v[5] << 8 | v[4]; +	tmp->driver_info[2] = 0x12345678; +} + +static void +qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp, +	struct scsi_qla_host *vha) +{ +	tmp->firmware_version[0] = vha->hw->fw_major_version; +	tmp->firmware_version[1] = vha->hw->fw_minor_version; +	tmp->firmware_version[2] = vha->hw->fw_subminor_version; +	tmp->firmware_version[3] = +	    vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes; +	tmp->firmware_version[4] = +	    vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]; +} + +static void +ql27xx_edit_template(struct scsi_qla_host *vha, +	struct qla27xx_fwdt_template *tmp) +{ +	qla27xx_time_stamp(tmp); +	qla27xx_driver_info(tmp); +	qla27xx_firmware_info(tmp, vha); +} + +static inline uint32_t +qla27xx_template_checksum(void *p, ulong size) +{ +	uint32_t *buf = p; +	uint64_t sum = 0; + +	size /= sizeof(*buf); + +	while (size--) +		sum += *buf++; + +	sum = (sum & 0xffffffff) + (sum >> 32); + +	return ~sum; +} + +static inline int +qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) +{ +	return qla27xx_template_checksum(tmp, tmp->template_size) == 0; +} + +static inline int +qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp) +{ +	return tmp->template_type == TEMPLATE_TYPE_FWDUMP; +} + +static void +qla27xx_execute_fwdt_template(struct scsi_qla_host *vha) +{ +	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template; +	ulong len; + +	if (qla27xx_fwdt_template_valid(tmp)) { +		len = tmp->template_size; +		tmp = memcpy(vha->hw->fw_dump, tmp, len); +		ql27xx_edit_template(vha, tmp); +		qla27xx_walk_template(vha, tmp, tmp, &len); +		vha->hw->fw_dump_len = len; +		vha->hw->fw_dumped = 1; +	} +} + +ulong +qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha) +{ +	struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template; +	ulong len = 0; + +	if (qla27xx_fwdt_template_valid(tmp)) { +		len = tmp->template_size; +		qla27xx_walk_template(vha, tmp, NULL, &len); +	} + +	return len; +} + +ulong +qla27xx_fwdt_template_size(void *p) +{ +	struct qla27xx_fwdt_template *tmp = p; + +	return tmp->template_size; +} + +ulong +qla27xx_fwdt_template_default_size(void) +{ +	return sizeof(ql27xx_fwdt_default_template); +} + +const void * +qla27xx_fwdt_template_default(void) +{ +	return ql27xx_fwdt_default_template; +} + +int +qla27xx_fwdt_template_valid(void *p) +{ +	struct qla27xx_fwdt_template *tmp = p; + +	if (!qla27xx_verify_template_header(tmp)) { +		ql_log(ql_log_warn, NULL, 0xd01c, +		    "%s: template type %x\n", __func__, tmp->template_type); +		return false; +	} + +	if (!qla27xx_verify_template_checksum(tmp)) { +		ql_log(ql_log_warn, NULL, 0xd01d, +		    "%s: failed template checksum\n", __func__); +		return false; +	} + +	return true; +} + +void +qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) +{ +	ulong flags = 0; + +	if (!hardware_locked) +		spin_lock_irqsave(&vha->hw->hardware_lock, flags); + +	if (!vha->hw->fw_dump) +		ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n"); +	else if (!vha->hw->fw_dump_template) +		ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n"); +	else +		qla27xx_execute_fwdt_template(vha); + +	if (!hardware_locked) +		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +} diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h new file mode 100644 index 00000000000..1967424c8e6 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -0,0 +1,216 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c)  2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ + +#ifndef __QLA_DMP27_H__ +#define	__QLA_DMP27_H__ + +#define IOBASE_ADDR	offsetof(struct device_reg_24xx, iobase_addr) + +struct __packed qla27xx_fwdt_template { +	uint32_t template_type; +	uint32_t entry_offset; +	uint32_t template_size; +	uint32_t reserved_1; + +	uint32_t entry_count; +	uint32_t template_version; +	uint32_t capture_timestamp; +	uint32_t template_checksum; + +	uint32_t reserved_2; +	uint32_t driver_info[3]; + +	uint32_t saved_state[16]; + +	uint32_t reserved_3[8]; +	uint32_t firmware_version[5]; +}; + +#define TEMPLATE_TYPE_FWDUMP		99 + +#define ENTRY_TYPE_NOP			0 +#define ENTRY_TYPE_TMP_END		255 +#define ENTRY_TYPE_RD_IOB_T1		256 +#define ENTRY_TYPE_WR_IOB_T1		257 +#define ENTRY_TYPE_RD_IOB_T2		258 +#define ENTRY_TYPE_WR_IOB_T2		259 +#define ENTRY_TYPE_RD_PCI		260 +#define ENTRY_TYPE_WR_PCI		261 +#define ENTRY_TYPE_RD_RAM		262 +#define ENTRY_TYPE_GET_QUEUE		263 +#define ENTRY_TYPE_GET_FCE		264 +#define ENTRY_TYPE_PSE_RISC		265 +#define ENTRY_TYPE_RST_RISC		266 +#define ENTRY_TYPE_DIS_INTR		267 +#define ENTRY_TYPE_GET_HBUF		268 +#define ENTRY_TYPE_SCRATCH		269 +#define ENTRY_TYPE_RDREMREG		270 +#define ENTRY_TYPE_WRREMREG		271 +#define ENTRY_TYPE_RDREMRAM		272 +#define ENTRY_TYPE_PCICFG		273 +#define ENTRY_TYPE_GET_SHADOW		274 + +#define CAPTURE_FLAG_PHYS_ONLY		BIT_0 +#define CAPTURE_FLAG_PHYS_VIRT		BIT_1 + +#define DRIVER_FLAG_SKIP_ENTRY		BIT_7 + +struct __packed qla27xx_fwdt_entry { +	struct __packed { +		uint32_t entry_type; +		uint32_t entry_size; +		uint32_t reserved_1; + +		uint8_t  capture_flags; +		uint8_t  reserved_2[2]; +		uint8_t  driver_flags; +	} hdr; +	union __packed { +		struct __packed { +		} t0; + +		struct __packed { +		} t255; + +		struct __packed { +			uint32_t base_addr; +			uint8_t  reg_width; +			uint16_t reg_count; +			uint8_t  pci_offset; +		} t256; + +		struct __packed { +			uint32_t base_addr; +			uint32_t write_data; +			uint8_t  pci_offset; +			uint8_t  reserved[3]; +		} t257; + +		struct __packed { +			uint32_t base_addr; +			uint8_t  reg_width; +			uint16_t reg_count; +			uint8_t  pci_offset; +			uint8_t  banksel_offset; +			uint8_t  reserved[3]; +			uint32_t bank; +		} t258; + +		struct __packed { +			uint32_t base_addr; +			uint32_t write_data; +			uint8_t  reserved[2]; +			uint8_t  pci_offset; +			uint8_t  banksel_offset; +			uint32_t bank; +		} t259; + +		struct __packed { +			uint8_t pci_offset; +			uint8_t reserved[3]; +		} t260; + +		struct __packed { +			uint8_t pci_offset; +			uint8_t reserved[3]; +			uint32_t write_data; +		} t261; + +		struct __packed { +			uint8_t  ram_area; +			uint8_t  reserved[3]; +			uint32_t start_addr; +			uint32_t end_addr; +		} t262; + +		struct __packed { +			uint32_t num_queues; +			uint8_t  queue_type; +			uint8_t  reserved[3]; +		} t263; + +		struct __packed { +			uint32_t fce_trace_size; +			uint64_t write_pointer; +			uint64_t base_pointer; +			uint32_t fce_enable_mb0; +			uint32_t fce_enable_mb2; +			uint32_t fce_enable_mb3; +			uint32_t fce_enable_mb4; +			uint32_t fce_enable_mb5; +			uint32_t fce_enable_mb6; +		} t264; + +		struct __packed { +		} t265; + +		struct __packed { +		} t266; + +		struct __packed { +			uint8_t  pci_offset; +			uint8_t  reserved[3]; +			uint32_t data; +		} t267; + +		struct __packed { +			uint8_t  buf_type; +			uint8_t  reserved[3]; +			uint32_t buf_size; +			uint64_t start_addr; +		} t268; + +		struct __packed { +			uint32_t scratch_size; +		} t269; + +		struct __packed { +			uint32_t addr; +			uint32_t count; +		} t270; + +		struct __packed { +			uint32_t addr; +			uint32_t data; +		} t271; + +		struct __packed { +			uint32_t addr; +			uint32_t count; +		} t272; + +		struct __packed { +			uint32_t addr; +			uint32_t count; +		} t273; + +		struct __packed { +			uint32_t num_queues; +			uint8_t  queue_type; +			uint8_t  reserved[3]; +		} t274; +	}; +}; + +#define T262_RAM_AREA_CRITICAL_RAM	1 +#define T262_RAM_AREA_EXTERNAL_RAM	2 +#define T262_RAM_AREA_SHARED_RAM	3 +#define T262_RAM_AREA_DDR_RAM		4 + +#define T263_QUEUE_TYPE_REQ		1 +#define T263_QUEUE_TYPE_RSP		2 +#define T263_QUEUE_TYPE_ATIO		3 + +#define T268_BUF_TYPE_EXTD_TRACE	1 +#define T268_BUF_TYPE_EXCH_BUFOFF	2 +#define T268_BUF_TYPE_EXTD_LOGIN	3 + +#define T274_QUEUE_TYPE_REQ_SHAD	1 +#define T274_QUEUE_TYPE_RSP_SHAD	2 +#define T274_QUEUE_TYPE_ATIO_SHAD	3 + +#endif diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index a808e293dae..4d2c98cbec4 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -1,15 +1,15 @@  /*   * QLogic Fibre Channel HBA Driver - * Copyright (c)  2003-2013 QLogic Corporation + * Copyright (c)  2003-2014 QLogic Corporation   *   * See LICENSE.qla2xxx for copyright and licensing details.   */  /*   * Driver version   */ -#define QLA2XXX_VERSION      "8.06.00.08-k" +#define QLA2XXX_VERSION      "8.07.00.08-k"  #define QLA_DRIVER_MAJOR_VER	8 -#define QLA_DRIVER_MINOR_VER	6 +#define QLA_DRIVER_MINOR_VER	7  #define QLA_DRIVER_PATCH_VER	0  #define QLA_DRIVER_BETA_VER	0 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index f85b9e5c1f0..e2beab96209 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -53,16 +53,6 @@  struct workqueue_struct *tcm_qla2xxx_free_wq;  struct workqueue_struct *tcm_qla2xxx_cmd_wq; -static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg) -{ -	return 1; -} - -static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg) -{ -	return 0; -} -  /*   * Parse WWN.   * If strict, we require lower-case hex and colon separators to be sure @@ -174,7 +164,7 @@ static int tcm_qla2xxx_npiv_parse_wwn(  	*wwnn = 0;  	/* count may include a LF at end of string */ -	if (name[cnt-1] == '\n') +	if (name[cnt-1] == '\n' || name[cnt-1] == 0)  		cnt--;  	/* validate we have enough characters for WWPN */ @@ -192,20 +182,6 @@ static int tcm_qla2xxx_npiv_parse_wwn(  	return 0;  } -static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len, -					u64 wwpn, u64 wwnn) -{ -	u8 b[8], b2[8]; - -	put_unaligned_be64(wwpn, b); -	put_unaligned_be64(wwnn, b2); -	return snprintf(buf, len, -		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x," -		"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", -		b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], -		b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]); -} -  static char *tcm_qla2xxx_npiv_get_fabric_name(void)  {  	return "qla2xxx_npiv"; @@ -237,15 +213,6 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)  	return lport->lport_naa_name;  } -static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg) -{ -	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, -				struct tcm_qla2xxx_tpg, se_tpg); -	struct tcm_qla2xxx_lport *lport = tpg->lport; - -	return &lport->lport_npiv_name[0]; -} -  static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)  {  	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, @@ -330,7 +297,7 @@ static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)  	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,  				struct tcm_qla2xxx_tpg, se_tpg); -	return QLA_TPG_ATTRIB(tpg)->generate_node_acls; +	return tpg->tpg_attrib.generate_node_acls;  }  static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) @@ -338,7 +305,7 @@ static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)  	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,  				struct tcm_qla2xxx_tpg, se_tpg); -	return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls; +	return tpg->tpg_attrib.cache_dynamic_acls;  }  static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) @@ -346,7 +313,7 @@ static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)  	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,  				struct tcm_qla2xxx_tpg, se_tpg); -	return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect; +	return tpg->tpg_attrib.demo_mode_write_protect;  }  static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) @@ -354,7 +321,7 @@ static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)  	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,  				struct tcm_qla2xxx_tpg, se_tpg); -	return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect; +	return tpg->tpg_attrib.prod_mode_write_protect;  }  static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) @@ -362,7 +329,7 @@ static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg  	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,  				struct tcm_qla2xxx_tpg, se_tpg); -	return QLA_TPG_ATTRIB(tpg)->demo_mode_login_only; +	return tpg->tpg_attrib.demo_mode_login_only;  }  static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl( @@ -505,6 +472,11 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)  	cmd->sg_cnt = se_cmd->t_data_nents;  	cmd->sg = se_cmd->t_data_sg; +	cmd->prot_sg_cnt = se_cmd->t_prot_nents; +	cmd->prot_sg = se_cmd->t_prot_sg; +	cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size; +	se_cmd->pi_err = 0; +  	/*  	 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup  	 * the SGL mappings into PCIe memory for incoming FCP WRITE data. @@ -600,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)  			return;  		} -		transport_generic_request_failure(&cmd->se_cmd, -						  TCM_CHECK_CONDITION_ABORT_CMD); +		if (cmd->se_cmd.pi_err) +			transport_generic_request_failure(&cmd->se_cmd, +				cmd->se_cmd.pi_err); +		else +			transport_generic_request_failure(&cmd->se_cmd, +				TCM_CHECK_CONDITION_ABORT_CMD); +  		return;  	} @@ -617,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)  	queue_work(tcm_qla2xxx_free_wq, &cmd->work);  } +static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) +{ +	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + +	/* take an extra kref to prevent cmd free too early. +	 * need to wait for SCSI status/check condition to +	 * finish responding generate by transport_generic_request_failure. +	 */ +	kref_get(&cmd->se_cmd.cmd_kref); +	transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); +} + +/* + * Called from qla_target.c:qlt_do_ctio_completion() + */ +static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) +{ +	INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); +	queue_work(tcm_qla2xxx_free_wq, &cmd->work); +} +  /*   * Called from qla_target.c:qlt_issue_task_mgmt()   */ @@ -643,6 +641,11 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)  	cmd->sg = se_cmd->t_data_sg;  	cmd->offset = 0; +	cmd->prot_sg_cnt = se_cmd->t_prot_nents; +	cmd->prot_sg = se_cmd->t_prot_sg; +	cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size; +	se_cmd->pi_err = 0; +  	/*  	 * Now queue completed DATA_IN the qla2xxx LLD and response ring  	 */ @@ -717,6 +720,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)  	qlt_xmit_tm_rsp(mcmd);  } +static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) +{ +	struct qla_tgt_cmd *cmd = container_of(se_cmd, +				struct qla_tgt_cmd, se_cmd); +	struct scsi_qla_host *vha = cmd->vha; +	struct qla_hw_data *ha = vha->hw; + +	if (!cmd->sg_mapped) +		return; + +	pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); +	cmd->sg_mapped = 0; +} +  /* Local pointer to allocated TCM configfs fabric module */  struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;  struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs; @@ -777,6 +794,9 @@ static void tcm_qla2xxx_put_session(struct se_session *se_sess)  static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)  { +	if (!sess) +		return; +  	assert_spin_locked(&sess->vha->hw->hardware_lock);  	kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);  } @@ -847,7 +867,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_show_##name(			\  	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,		\  			struct tcm_qla2xxx_tpg, se_tpg);		\  									\ -	return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name);	\ +	return sprintf(page, "%u\n", tpg->tpg_attrib.name);	\  }									\  									\  static ssize_t tcm_qla2xxx_tpg_attrib_store_##name(			\ @@ -948,16 +968,41 @@ static ssize_t tcm_qla2xxx_tpg_show_enable(  			atomic_read(&tpg->lport_tpg_enabled));  } +static void tcm_qla2xxx_depend_tpg(struct work_struct *work) +{ +	struct tcm_qla2xxx_tpg *base_tpg = container_of(work, +				struct tcm_qla2xxx_tpg, tpg_base_work); +	struct se_portal_group *se_tpg = &base_tpg->se_tpg; +	struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; + +	if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, +				  &se_tpg->tpg_group.cg_item)) { +		atomic_set(&base_tpg->lport_tpg_enabled, 1); +		qlt_enable_vha(base_vha); +	} +	complete(&base_tpg->tpg_base_comp); +} + +static void tcm_qla2xxx_undepend_tpg(struct work_struct *work) +{ +	struct tcm_qla2xxx_tpg *base_tpg = container_of(work, +				struct tcm_qla2xxx_tpg, tpg_base_work); +	struct se_portal_group *se_tpg = &base_tpg->se_tpg; +	struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha; + +	if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) { +		atomic_set(&base_tpg->lport_tpg_enabled, 0); +		configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, +				       &se_tpg->tpg_group.cg_item); +	} +	complete(&base_tpg->tpg_base_comp); +} +  static ssize_t tcm_qla2xxx_tpg_store_enable(  	struct se_portal_group *se_tpg,  	const char *page,  	size_t count)  { -	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; -	struct tcm_qla2xxx_lport *lport = container_of(se_wwn, -			struct tcm_qla2xxx_lport, lport_wwn); -	struct scsi_qla_host *vha = lport->qla_vha; -	struct qla_hw_data *ha = vha->hw;  	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,  			struct tcm_qla2xxx_tpg, se_tpg);  	unsigned long op; @@ -972,19 +1017,28 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(  		pr_err("Illegal value for tpg_enable: %lu\n", op);  		return -EINVAL;  	} -  	if (op) { -		atomic_set(&tpg->lport_tpg_enabled, 1); -		qlt_enable_vha(vha); +		if (atomic_read(&tpg->lport_tpg_enabled)) +			return -EEXIST; + +		INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);  	} else { -		if (!ha->tgt.qla_tgt) { -			pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n"); -			return -ENODEV; -		} -		atomic_set(&tpg->lport_tpg_enabled, 0); -		qlt_stop_phase1(ha->tgt.qla_tgt); +		if (!atomic_read(&tpg->lport_tpg_enabled)) +			return count; + +		INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);  	} +	init_completion(&tpg->tpg_base_comp); +	schedule_work(&tpg->tpg_base_work); +	wait_for_completion(&tpg->tpg_base_comp); +	if (op) { +		if (!atomic_read(&tpg->lport_tpg_enabled)) +			return -ENODEV; +	} else { +		if (atomic_read(&tpg->lport_tpg_enabled)) +			return -EPERM; +	}  	return count;  } @@ -1011,7 +1065,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(  	if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)  		return ERR_PTR(-EINVAL); -	if (!lport->qla_npiv_vp && (tpgt != 1)) { +	if ((tpgt != 1)) {  		pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");  		return ERR_PTR(-ENOSYS);  	} @@ -1027,10 +1081,10 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(  	 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic  	 * NodeACLs  	 */ -	QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1; -	QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1; -	QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1; -	QLA_TPG_ATTRIB(tpg)->demo_mode_login_only = 1; +	tpg->tpg_attrib.generate_node_acls = 1; +	tpg->tpg_attrib.demo_mode_write_protect = 1; +	tpg->tpg_attrib.cache_dynamic_acls = 1; +	tpg->tpg_attrib.demo_mode_login_only = 1;  	ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,  				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -1038,11 +1092,8 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(  		kfree(tpg);  		return NULL;  	} -	/* -	 * Setup local TPG=1 pointer for non NPIV mode. -	 */ -	if (lport->qla_npiv_vp == NULL) -		lport->tpg_1 = tpg; + +	lport->tpg_1 = tpg;  	return &tpg->se_tpg;  } @@ -1053,24 +1104,75 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)  			struct tcm_qla2xxx_tpg, se_tpg);  	struct tcm_qla2xxx_lport *lport = tpg->lport;  	struct scsi_qla_host *vha = lport->qla_vha; -	struct qla_hw_data *ha = vha->hw;  	/*  	 * Call into qla2x_target.c LLD logic to shutdown the active  	 * FC Nexuses and disable target mode operation for this qla_hw_data  	 */ -	if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop) -		qlt_stop_phase1(ha->tgt.qla_tgt); +	if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) +		qlt_stop_phase1(vha->vha_tgt.qla_tgt);  	core_tpg_deregister(se_tpg);  	/*  	 * Clear local TPG=1 pointer for non NPIV mode.  	 */ -	if (lport->qla_npiv_vp == NULL) -		lport->tpg_1 = NULL; - +	lport->tpg_1 = NULL;  	kfree(tpg);  } +static ssize_t tcm_qla2xxx_npiv_tpg_show_enable( +	struct se_portal_group *se_tpg, +	char *page) +{ +	return tcm_qla2xxx_tpg_show_enable(se_tpg, page); +} + +static ssize_t tcm_qla2xxx_npiv_tpg_store_enable( +	struct se_portal_group *se_tpg, +	const char *page, +	size_t count) +{ +	struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; +	struct tcm_qla2xxx_lport *lport = container_of(se_wwn, +			struct tcm_qla2xxx_lport, lport_wwn); +	struct scsi_qla_host *vha = lport->qla_vha; +	struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, +			struct tcm_qla2xxx_tpg, se_tpg); +	unsigned long op; +	int rc; + +	rc = kstrtoul(page, 0, &op); +	if (rc < 0) { +		pr_err("kstrtoul() returned %d\n", rc); +		return -EINVAL; +	} +	if ((op != 1) && (op != 0)) { +		pr_err("Illegal value for tpg_enable: %lu\n", op); +		return -EINVAL; +	} +	if (op) { +		if (atomic_read(&tpg->lport_tpg_enabled)) +			return -EEXIST; + +		atomic_set(&tpg->lport_tpg_enabled, 1); +		qlt_enable_vha(vha); +	} else { +		if (!atomic_read(&tpg->lport_tpg_enabled)) +			return count; + +		atomic_set(&tpg->lport_tpg_enabled, 0); +		qlt_stop_phase1(vha->vha_tgt.qla_tgt); +	} + +	return count; +} + +TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = { +        &tcm_qla2xxx_npiv_tpg_enable.attr, +        NULL, +}; +  static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(  	struct se_wwn *wwn,  	struct config_group *group, @@ -1095,12 +1197,22 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(  	tpg->lport = lport;  	tpg->lport_tpgt = tpgt; +	/* +	 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic +	 * NodeACLs +	 */ +	tpg->tpg_attrib.generate_node_acls = 1; +	tpg->tpg_attrib.demo_mode_write_protect = 1; +	tpg->tpg_attrib.cache_dynamic_acls = 1; +	tpg->tpg_attrib.demo_mode_login_only = 1; +  	ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,  				&tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);  	if (ret < 0) {  		kfree(tpg);  		return NULL;  	} +	lport->tpg_1 = tpg;  	return &tpg->se_tpg;  } @@ -1111,13 +1223,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(  	scsi_qla_host_t *vha,  	const uint8_t *s_id)  { -	struct qla_hw_data *ha = vha->hw;  	struct tcm_qla2xxx_lport *lport;  	struct se_node_acl *se_nacl;  	struct tcm_qla2xxx_nacl *nacl;  	u32 key; -	lport = ha->tgt.target_lport_ptr; +	lport = vha->vha_tgt.target_lport_ptr;  	if (!lport) {  		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");  		dump_stack(); @@ -1221,13 +1332,12 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(  	scsi_qla_host_t *vha,  	const uint16_t loop_id)  { -	struct qla_hw_data *ha = vha->hw;  	struct tcm_qla2xxx_lport *lport;  	struct se_node_acl *se_nacl;  	struct tcm_qla2xxx_nacl *nacl;  	struct tcm_qla2xxx_fc_loopid *fc_loopid; -	lport = ha->tgt.target_lport_ptr; +	lport = vha->vha_tgt.target_lport_ptr;  	if (!lport) {  		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");  		dump_stack(); @@ -1341,6 +1451,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)  {  	struct qla_tgt *tgt = sess->tgt;  	struct qla_hw_data *ha = tgt->ha; +	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);  	struct se_session *se_sess;  	struct se_node_acl *se_nacl;  	struct tcm_qla2xxx_lport *lport; @@ -1357,7 +1468,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)  	se_nacl = se_sess->se_node_acl;  	nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); -	lport = ha->tgt.target_lport_ptr; +	lport = vha->vha_tgt.target_lport_ptr;  	if (!lport) {  		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");  		dump_stack(); @@ -1390,8 +1501,10 @@ static int tcm_qla2xxx_check_initiator_node_acl(  	struct qla_tgt_sess *sess = qla_tgt_sess;  	unsigned char port_name[36];  	unsigned long flags; +	int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count : +		       TCM_QLA2XXX_DEFAULT_TAGS; -	lport = ha->tgt.target_lport_ptr; +	lport = vha->vha_tgt.target_lport_ptr;  	if (!lport) {  		pr_err("Unable to locate struct tcm_qla2xxx_lport\n");  		dump_stack(); @@ -1407,7 +1520,9 @@ static int tcm_qla2xxx_check_initiator_node_acl(  	}  	se_tpg = &tpg->se_tpg; -	se_sess = transport_init_session(); +	se_sess = transport_init_session_tags(num_tags, +					      sizeof(struct qla_tgt_cmd), +					      TARGET_PROT_NORMAL);  	if (IS_ERR(se_sess)) {  		pr_err("Unable to initialize struct se_session\n");  		return PTR_ERR(se_sess); @@ -1455,7 +1570,8 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,  {  	struct qla_tgt *tgt = sess->tgt;  	struct qla_hw_data *ha = tgt->ha; -	struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr; +	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); +	struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;  	struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;  	struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,  			struct tcm_qla2xxx_nacl, se_node_acl); @@ -1524,6 +1640,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,  static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {  	.handle_cmd		= tcm_qla2xxx_handle_cmd,  	.handle_data		= tcm_qla2xxx_handle_data, +	.handle_dif_err		= tcm_qla2xxx_handle_dif_err,  	.handle_tmr		= tcm_qla2xxx_handle_tmr,  	.free_cmd		= tcm_qla2xxx_free_cmd,  	.free_mcmd		= tcm_qla2xxx_free_mcmd, @@ -1562,15 +1679,18 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)  	return 0;  } -static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha) +static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, +					 void *target_lport_ptr, +					 u64 npiv_wwpn, u64 npiv_wwnn)  {  	struct qla_hw_data *ha = vha->hw; -	struct tcm_qla2xxx_lport *lport; +	struct tcm_qla2xxx_lport *lport = +			(struct tcm_qla2xxx_lport *)target_lport_ptr;  	/* -	 * Setup local pointer to vha, NPIV VP pointer (if present) and -	 * vha->tcm_lport pointer +	 * Setup tgt_ops, local pointer to vha and target_lport_ptr  	 */ -	lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr; +	ha->tgt.tgt_ops = &tcm_qla2xxx_template; +	vha->vha_tgt.target_lport_ptr = target_lport_ptr;  	lport->qla_vha = vha;  	return 0; @@ -1602,8 +1722,8 @@ static struct se_wwn *tcm_qla2xxx_make_lport(  	if (ret != 0)  		goto out; -	ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn, -				tcm_qla2xxx_lport_register_cb, lport); +	ret = qlt_lport_register(lport, wwpn, 0, 0, +				 tcm_qla2xxx_lport_register_cb);  	if (ret != 0)  		goto out_lport; @@ -1621,7 +1741,6 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)  	struct tcm_qla2xxx_lport *lport = container_of(wwn,  			struct tcm_qla2xxx_lport, lport_wwn);  	struct scsi_qla_host *vha = lport->qla_vha; -	struct qla_hw_data *ha = vha->hw;  	struct se_node_acl *node;  	u32 key = 0; @@ -1630,8 +1749,8 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)  	 * shutdown of struct qla_tgt after the call to  	 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..  	 */ -	if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped) -		qlt_stop_phase2(ha->tgt.qla_tgt); +	if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) +		qlt_stop_phase2(vha->vha_tgt.qla_tgt);  	qlt_lport_deregister(vha); @@ -1642,17 +1761,79 @@ static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)  	kfree(lport);  } +static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, +					      void *target_lport_ptr, +					      u64 npiv_wwpn, u64 npiv_wwnn) +{ +	struct fc_vport *vport; +	struct Scsi_Host *sh = base_vha->host; +	struct scsi_qla_host *npiv_vha; +	struct tcm_qla2xxx_lport *lport = +			(struct tcm_qla2xxx_lport *)target_lport_ptr; +	struct tcm_qla2xxx_lport *base_lport = +			(struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; +	struct tcm_qla2xxx_tpg *base_tpg; +	struct fc_vport_identifiers vport_id; + +	if (!qla_tgt_mode_enabled(base_vha)) { +		pr_err("qla2xxx base_vha not enabled for target mode\n"); +		return -EPERM; +	} + +	if (!base_lport || !base_lport->tpg_1 || +	    !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) { +		pr_err("qla2xxx base_lport or tpg_1 not available\n"); +		return -EPERM; +	} +	base_tpg = base_lport->tpg_1; + +	memset(&vport_id, 0, sizeof(vport_id)); +	vport_id.port_name = npiv_wwpn; +	vport_id.node_name = npiv_wwnn; +	vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; +	vport_id.vport_type = FC_PORTTYPE_NPIV; +	vport_id.disable = false; + +	vport = fc_vport_create(sh, 0, &vport_id); +	if (!vport) { +		pr_err("fc_vport_create failed for qla2xxx_npiv\n"); +		return -ENODEV; +	} +	/* +	 * Setup local pointer to NPIV vhba + target_lport_ptr +	 */ +	npiv_vha = (struct scsi_qla_host *)vport->dd_data; +	npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; +	lport->qla_vha = npiv_vha; +	scsi_host_get(npiv_vha->host); +	return 0; +} + +  static struct se_wwn *tcm_qla2xxx_npiv_make_lport(  	struct target_fabric_configfs *tf,  	struct config_group *group,  	const char *name)  {  	struct tcm_qla2xxx_lport *lport; -	u64 npiv_wwpn, npiv_wwnn; +	u64 phys_wwpn, npiv_wwpn, npiv_wwnn; +	char *p, tmp[128];  	int ret; -	if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1, -				&npiv_wwpn, &npiv_wwnn) < 0) +	snprintf(tmp, 128, "%s", name); + +	p = strchr(tmp, '@'); +	if (!p) { +		pr_err("Unable to locate NPIV '@' seperator\n"); +		return ERR_PTR(-EINVAL); +	} +	*p++ = '\0'; + +	if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) +		return ERR_PTR(-EINVAL); + +	if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, +				       &npiv_wwpn, &npiv_wwnn) < 0)  		return ERR_PTR(-EINVAL);  	lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); @@ -1662,16 +1843,21 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(  	}  	lport->lport_npiv_wwpn = npiv_wwpn;  	lport->lport_npiv_wwnn = npiv_wwnn; -	tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0], -			TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);  	sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); -/* FIXME: tcm_qla2xxx_npiv_make_lport */ -	ret = -ENOSYS; +	ret = tcm_qla2xxx_init_lport(lport);  	if (ret != 0)  		goto out; +	ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, +				 tcm_qla2xxx_lport_register_npiv_cb); +	if (ret != 0) +		goto out_lport; +  	return &lport->lport_wwn; +out_lport: +	vfree(lport->lport_loopid_map); +	btree_destroy32(&lport->lport_fcport_map);  out:  	kfree(lport);  	return ERR_PTR(ret); @@ -1681,14 +1867,16 @@ static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)  {  	struct tcm_qla2xxx_lport *lport = container_of(wwn,  			struct tcm_qla2xxx_lport, lport_wwn); -	struct scsi_qla_host *vha = lport->qla_vha; -	struct Scsi_Host *sh = vha->host; +	struct scsi_qla_host *npiv_vha = lport->qla_vha; +	struct qla_hw_data *ha = npiv_vha->hw; +	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + +	scsi_host_put(npiv_vha->host);  	/* -	 * Notify libfc that we want to release the lport->npiv_vport +	 * Notify libfc that we want to release the vha->fc_vport  	 */ -	fc_vport_terminate(lport->npiv_vport); - -	scsi_host_put(sh); +	fc_vport_terminate(npiv_vha->fc_vport); +	scsi_host_put(base_vha->host);  	kfree(lport);  } @@ -1744,6 +1932,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {  	.queue_data_in			= tcm_qla2xxx_queue_data_in,  	.queue_status			= tcm_qla2xxx_queue_status,  	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp, +	.aborted_task			= tcm_qla2xxx_aborted_task,  	/*  	 * Setup function pointers for generic logic in  	 * target_core_fabric_configfs.c @@ -1763,20 +1952,22 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {  static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {  	.get_fabric_name		= tcm_qla2xxx_npiv_get_fabric_name,  	.get_fabric_proto_ident		= tcm_qla2xxx_get_fabric_proto_ident, -	.tpg_get_wwn			= tcm_qla2xxx_npiv_get_fabric_wwn, +	.tpg_get_wwn			= tcm_qla2xxx_get_fabric_wwn,  	.tpg_get_tag			= tcm_qla2xxx_get_tag,  	.tpg_get_default_depth		= tcm_qla2xxx_get_default_depth,  	.tpg_get_pr_transport_id	= tcm_qla2xxx_get_pr_transport_id,  	.tpg_get_pr_transport_id_len	= tcm_qla2xxx_get_pr_transport_id_len,  	.tpg_parse_pr_out_transport_id	= tcm_qla2xxx_parse_pr_out_transport_id, -	.tpg_check_demo_mode		= tcm_qla2xxx_check_false, -	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_true, -	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true, -	.tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false, +	.tpg_check_demo_mode		= tcm_qla2xxx_check_demo_mode, +	.tpg_check_demo_mode_cache	= tcm_qla2xxx_check_demo_mode_cache, +	.tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, +	.tpg_check_prod_mode_write_protect = +	    tcm_qla2xxx_check_prod_write_protect,  	.tpg_check_demo_mode_login_only	= tcm_qla2xxx_check_demo_mode_login_only,  	.tpg_alloc_fabric_acl		= tcm_qla2xxx_alloc_fabric_acl,  	.tpg_release_fabric_acl		= tcm_qla2xxx_release_fabric_acl,  	.tpg_get_inst_index		= tcm_qla2xxx_tpg_get_inst_index, +	.check_stop_free                = tcm_qla2xxx_check_stop_free,  	.release_cmd			= tcm_qla2xxx_release_cmd,  	.put_session			= tcm_qla2xxx_put_session,  	.shutdown_session		= tcm_qla2xxx_shutdown_session, @@ -1791,6 +1982,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {  	.queue_data_in			= tcm_qla2xxx_queue_data_in,  	.queue_status			= tcm_qla2xxx_queue_status,  	.queue_tm_rsp			= tcm_qla2xxx_queue_tm_rsp, +	.aborted_task			= tcm_qla2xxx_aborted_task,  	/*  	 * Setup function pointers for generic logic in  	 * target_core_fabric_configfs.c @@ -1830,16 +2022,16 @@ static int tcm_qla2xxx_register_configfs(void)  	/*  	 * Setup default attribute lists for various fabric->tf_cit_tmpl  	 */ -	TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = +	fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs; +	fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs =  						tcm_qla2xxx_tpg_attrib_attrs; -	TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; -	TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; +	fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;  	/*  	 * Register the fabric for use within TCM  	 */ @@ -1870,15 +2062,16 @@ static int tcm_qla2xxx_register_configfs(void)  	/*  	 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl  	 */ -	TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; -	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL; -	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; -	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL; -	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; -	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; -	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; -	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; -	TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; +	npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs; +	npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = +	    tcm_qla2xxx_npiv_tpg_attrs; +	npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; +	npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; +	npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; +	npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL; +	npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; +	npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL; +	npiv_fabric->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;  	/*  	 * Register the npiv_fabric for use within TCM  	 */ diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h index 329327528a5..10c00214564 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h @@ -4,8 +4,11 @@  #define TCM_QLA2XXX_VERSION	"v0.1"  /* length of ASCII WWPNs including pad */  #define TCM_QLA2XXX_NAMELEN	32 -/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */ -#define TCM_QLA2XXX_NPIV_NAMELEN 66 +/* + * Number of pre-allocated per-session tags, based upon the worst-case + * per port number of iocbs + */ +#define TCM_QLA2XXX_DEFAULT_TAGS 2088  #include "qla_target.h" @@ -43,10 +46,11 @@ struct tcm_qla2xxx_tpg {  	struct tcm_qla2xxx_tpg_attrib tpg_attrib;  	/* Returned by tcm_qla2xxx_make_tpg() */  	struct se_portal_group se_tpg; +	/* Items for dealing with configfs_depend_item */ +	struct completion tpg_base_comp; +	struct work_struct tpg_base_work;  }; -#define QLA_TPG_ATTRIB(tpg)	(&(tpg)->tpg_attrib) -  struct tcm_qla2xxx_fc_loopid {  	struct se_node_acl *se_nacl;  }; @@ -64,20 +68,14 @@ struct tcm_qla2xxx_lport {  	char lport_name[TCM_QLA2XXX_NAMELEN];  	/* ASCII formatted naa WWPN for VPD page 83 etc */  	char lport_naa_name[TCM_QLA2XXX_NAMELEN]; -	/* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */ -	char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];  	/* map for fc_port pointers in 24-bit FC Port ID space */  	struct btree_head32 lport_fcport_map;  	/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */  	struct tcm_qla2xxx_fc_loopid *lport_loopid_map;  	/* Pointer to struct scsi_qla_host from qla2xxx LLD */  	struct scsi_qla_host *qla_vha; -	/* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */ -	struct scsi_qla_host *qla_npiv_vp;  	/* Pointer to struct qla_tgt pointer */  	struct qla_tgt lport_qla_tgt; -	/* Pointer to struct fc_vport for NPIV vport from libfc */ -	struct fc_vport *npiv_vport;  	/* Pointer to TPG=1 for non NPIV mode */  	struct tcm_qla2xxx_tpg *tpg_1;  	/* Returned by tcm_qla2xxx_make_lport() */ diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c index 8196c2f7915..556c1525f88 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.c +++ b/drivers/scsi/qla4xxx/ql4_83xx.c @@ -249,110 +249,6 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)  	qla4_83xx_flash_unlock(ha);  } -/** - * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory - * @ha: Pointer to adapter structure - * @addr: Flash address to write to - * @data: Data to be written - * @count: word_count to be written - * - * Return: On success return QLA_SUCCESS - *	   On error return QLA_ERROR - **/ -int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, -				uint32_t *data, uint32_t count) -{ -	int i, j; -	uint32_t agt_ctrl; -	unsigned long flags; -	int ret_val = QLA_SUCCESS; - -	/* Only 128-bit aligned access */ -	if (addr & 0xF) { -		ret_val = QLA_ERROR; -		goto exit_ms_mem_write; -	} - -	write_lock_irqsave(&ha->hw_lock, flags); - -	/* Write address */ -	ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); -	if (ret_val == QLA_ERROR) { -		ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", -			   __func__); -		goto exit_ms_mem_write_unlock; -	} - -	for (i = 0; i < count; i++, addr += 16) { -		if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, -					     QLA8XXX_ADDR_QDR_NET_MAX)) || -		      (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, -					     QLA8XXX_ADDR_DDR_NET_MAX)))) { -			ret_val = QLA_ERROR; -			goto exit_ms_mem_write_unlock; -		} - -		ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO, -						    addr); -		/* Write data */ -		ret_val |= qla4_83xx_wr_reg_indirect(ha, -						     MD_MIU_TEST_AGT_WRDATA_LO, -						     *data++); -		ret_val |= qla4_83xx_wr_reg_indirect(ha, -						     MD_MIU_TEST_AGT_WRDATA_HI, -						     *data++); -		ret_val |= qla4_83xx_wr_reg_indirect(ha, -						     MD_MIU_TEST_AGT_WRDATA_ULO, -						     *data++); -		ret_val |= qla4_83xx_wr_reg_indirect(ha, -						     MD_MIU_TEST_AGT_WRDATA_UHI, -						     *data++); -		if (ret_val == QLA_ERROR) { -			ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", -				   __func__); -			goto exit_ms_mem_write_unlock; -		} - -		/* Check write status */ -		ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, -						    MIU_TA_CTL_WRITE_ENABLE); -		ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, -						     MIU_TA_CTL_WRITE_START); -		if (ret_val == QLA_ERROR) { -			ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", -				   __func__); -			goto exit_ms_mem_write_unlock; -		} - -		for (j = 0; j < MAX_CTL_CHECK; j++) { -			ret_val = qla4_83xx_rd_reg_indirect(ha, -							MD_MIU_TEST_AGT_CTRL, -							&agt_ctrl); -			if (ret_val == QLA_ERROR) { -				ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", -					   __func__); -				goto exit_ms_mem_write_unlock; -			} -			if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) -				break; -		} - -		/* Status check failed */ -		if (j >= MAX_CTL_CHECK) { -			printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", -					   __func__); -			ret_val = QLA_ERROR; -			goto exit_ms_mem_write_unlock; -		} -	} - -exit_ms_mem_write_unlock: -	write_unlock_irqrestore(&ha->hw_lock, flags); - -exit_ms_mem_write: -	return ret_val; -} -  #define INTENT_TO_RECOVER	0x01  #define PROCEED_TO_RECOVER	0x02 @@ -465,7 +361,7 @@ int qla4_83xx_drv_lock(struct scsi_qla_host *ha)  				}  				/* Recovery Failed, some other function  				 * has the lock, wait for 2secs and retry */ -				ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timout\n", +				ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",  					   __func__, ha->func_num);  				timeout = 0;  			} @@ -760,7 +656,7 @@ static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)  			  __func__));  	/* 128 bit/16 byte write to MS memory */ -	ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, +	ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,  					      count);  	if (ret_val == QLA_ERROR) {  		ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n", @@ -1304,12 +1200,24 @@ static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)  static int qla4_83xx_restart(struct scsi_qla_host *ha)  {  	int ret_val = QLA_SUCCESS; +	uint32_t idc_ctrl;  	qla4_83xx_process_stop_seq(ha); -	/* Collect minidump*/ -	if (!test_and_clear_bit(AF_83XX_NO_FW_DUMP, &ha->flags)) +	/* +	 * Collect minidump. +	 * If IDC_CTRL BIT1 is set, clear it on going to INIT state and +	 * don't collect minidump +	 */ +	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); +	if (idc_ctrl & GRACEFUL_RESET_BIT1) { +		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, +				 (idc_ctrl & ~GRACEFUL_RESET_BIT1)); +		ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n", +			   __func__); +	} else {  		qla4_8xxx_get_minidump(ha); +	}  	qla4_83xx_process_init_seq(ha); @@ -1664,3 +1572,23 @@ void qla4_83xx_disable_pause(struct scsi_qla_host *ha)  	__qla4_83xx_disable_pause(ha);  	ha->isp_ops->idc_unlock(ha);  } + +/** + * qla4_83xx_is_detached - Check if we are marked invisible. + * @ha: Pointer to host adapter structure. + **/ +int qla4_83xx_is_detached(struct scsi_qla_host *ha) +{ +	uint32_t drv_active; + +	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE); + +	if (test_bit(AF_INIT_DONE, &ha->flags) && +	    !(drv_active & (1 << ha->func_num))) { +		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n", +				  __func__, drv_active)); +		return QLA_SUCCESS; +	} + +	return QLA_ERROR; +} diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h index a0de6e25ea5..775fdf9fcc8 100644 --- a/drivers/scsi/qla4xxx/ql4_83xx.h +++ b/drivers/scsi/qla4xxx/ql4_83xx.h @@ -254,6 +254,50 @@ struct qla83xx_minidump_entry_pollrd {  	uint32_t rsvd_1;  }; +struct qla8044_minidump_entry_rddfe { +	struct qla8xxx_minidump_entry_hdr h; +	uint32_t addr_1; +	uint32_t value; +	uint8_t stride; +	uint8_t stride2; +	uint16_t count; +	uint32_t poll; +	uint32_t mask; +	uint32_t modify_mask; +	uint32_t data_size; +	uint32_t rsvd; + +} __packed; + +struct qla8044_minidump_entry_rdmdio { +	struct qla8xxx_minidump_entry_hdr h; + +	uint32_t addr_1; +	uint32_t addr_2; +	uint32_t value_1; +	uint8_t stride_1; +	uint8_t stride_2; +	uint16_t count; +	uint32_t poll; +	uint32_t mask; +	uint32_t value_2; +	uint32_t data_size; + +} __packed; + +struct qla8044_minidump_entry_pollwr { +	struct qla8xxx_minidump_entry_hdr h; +	uint32_t addr_1; +	uint32_t addr_2; +	uint32_t value_1; +	uint32_t value_2; +	uint32_t poll; +	uint32_t mask; +	uint32_t data_size; +	uint32_t rsvd; + +} __packed; +  /* RDMUX2 Entry */  struct qla83xx_minidump_entry_rdmux2 {  	struct qla8xxx_minidump_entry_hdr h; diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c index cf8fdf1d125..9f92cbf9647 100644 --- a/drivers/scsi/qla4xxx/ql4_bsg.c +++ b/drivers/scsi/qla4xxx/ql4_bsg.c @@ -446,6 +446,363 @@ leave:  	return rval;  } +static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job) +{ +	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); +	struct scsi_qla_host *ha = to_qla_host(host); +	struct iscsi_bsg_request *bsg_req = bsg_job->request; +	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; +	uint8_t *rsp_ptr = NULL; +	uint32_t mbox_cmd[MBOX_REG_COUNT]; +	uint32_t mbox_sts[MBOX_REG_COUNT]; +	int status = QLA_ERROR; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + +	if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) { +		ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n", +			   __func__); +		bsg_reply->result = DID_ERROR << 16; +		goto exit_diag_mem_test; +	} + +	bsg_reply->reply_payload_rcv_len = 0; +	memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1], +	       sizeof(uint32_t) * MBOX_REG_COUNT); + +	DEBUG2(ql4_printk(KERN_INFO, ha, +			  "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", +			  __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], +			  mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], +			  mbox_cmd[7])); + +	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], +					 &mbox_sts[0]); + +	DEBUG2(ql4_printk(KERN_INFO, ha, +			  "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", +			  __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2], +			  mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], +			  mbox_sts[7])); + +	if (status == QLA_SUCCESS) +		bsg_reply->result = DID_OK << 16; +	else +		bsg_reply->result = DID_ERROR << 16; + +	/* Send mbox_sts to application */ +	bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts); +	rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply); +	memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts)); + +exit_diag_mem_test: +	DEBUG2(ql4_printk(KERN_INFO, ha, +			  "%s: bsg_reply->result = x%x, status = %s\n", +			  __func__, bsg_reply->result, STATUS(status))); + +	bsg_job_done(bsg_job, bsg_reply->result, +		     bsg_reply->reply_payload_rcv_len); +} + +static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha, +						   int wait_for_link) +{ +	int status = QLA_SUCCESS; + +	if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) { +		ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout", +			   __func__, ha->idc_extend_tmo); +		if (ha->idc_extend_tmo) { +			if (!wait_for_completion_timeout(&ha->idc_comp, +						(ha->idc_extend_tmo * HZ))) { +				ha->notify_idc_comp = 0; +				ha->notify_link_up_comp = 0; +				ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received", +					   __func__); +				status = QLA_ERROR; +				goto exit_wait; +			} else { +				DEBUG2(ql4_printk(KERN_INFO, ha, +						  "%s: IDC Complete notification received\n", +						  __func__)); +			} +		} +	} else { +		DEBUG2(ql4_printk(KERN_INFO, ha, +				  "%s: IDC Complete notification received\n", +				  __func__)); +	} +	ha->notify_idc_comp = 0; + +	if (wait_for_link) { +		if (!wait_for_completion_timeout(&ha->link_up_comp, +						 (IDC_COMP_TOV * HZ))) { +			ha->notify_link_up_comp = 0; +			ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received", +				   __func__); +			status = QLA_ERROR; +			goto exit_wait; +		} else { +			DEBUG2(ql4_printk(KERN_INFO, ha, +					  "%s: LINK UP notification received\n", +					  __func__)); +		} +		ha->notify_link_up_comp = 0; +	} + +exit_wait: +	return status; +} + +static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha, +					 uint32_t *mbox_cmd) +{ +	uint32_t config = 0; +	int status = QLA_SUCCESS; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + +	status = qla4_83xx_get_port_config(ha, &config); +	if (status != QLA_SUCCESS) +		goto exit_pre_loopback_config; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n", +			  __func__, config)); + +	if ((config & ENABLE_INTERNAL_LOOPBACK) || +	    (config & ENABLE_EXTERNAL_LOOPBACK)) { +		ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid requiest\n", +			   __func__); +		goto exit_pre_loopback_config; +	} + +	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK) +		config |= ENABLE_INTERNAL_LOOPBACK; + +	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK) +		config |= ENABLE_EXTERNAL_LOOPBACK; + +	config &= ~ENABLE_DCBX; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n", +			  __func__, config)); + +	ha->notify_idc_comp = 1; +	ha->notify_link_up_comp = 1; + +	/* get the link state */ +	qla4xxx_get_firmware_state(ha); + +	status = qla4_83xx_set_port_config(ha, &config); +	if (status != QLA_SUCCESS) { +		ha->notify_idc_comp = 0; +		ha->notify_link_up_comp = 0; +		goto exit_pre_loopback_config; +	} +exit_pre_loopback_config: +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__, +			  STATUS(status))); +	return status; +} + +static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha, +					  uint32_t *mbox_cmd) +{ +	int status = QLA_SUCCESS; +	uint32_t config = 0; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + +	status = qla4_83xx_get_port_config(ha, &config); +	if (status != QLA_SUCCESS) +		goto exit_post_loopback_config; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__, +			  config)); + +	if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK) +		config &= ~ENABLE_INTERNAL_LOOPBACK; +	else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK) +		config &= ~ENABLE_EXTERNAL_LOOPBACK; + +	config |= ENABLE_DCBX; + +	DEBUG2(ql4_printk(KERN_INFO, ha, +			  "%s: Restore default port config=%08X\n", __func__, +			  config)); + +	ha->notify_idc_comp = 1; +	if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) +		ha->notify_link_up_comp = 1; + +	status = qla4_83xx_set_port_config(ha, &config); +	if (status != QLA_SUCCESS) { +		ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n", +			   __func__); +		set_bit(DPC_RESET_HA, &ha->dpc_flags); +		clear_bit(AF_LOOPBACK, &ha->flags); +		goto exit_post_loopback_config; +	} + +exit_post_loopback_config: +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__, +			  STATUS(status))); +	return status; +} + +static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job) +{ +	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); +	struct scsi_qla_host *ha = to_qla_host(host); +	struct iscsi_bsg_request *bsg_req = bsg_job->request; +	struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; +	uint8_t *rsp_ptr = NULL; +	uint32_t mbox_cmd[MBOX_REG_COUNT]; +	uint32_t mbox_sts[MBOX_REG_COUNT]; +	int wait_for_link = 1; +	int status = QLA_ERROR; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + +	bsg_reply->reply_payload_rcv_len = 0; + +	if (test_bit(AF_LOOPBACK, &ha->flags)) { +		ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n", +			   __func__); +		bsg_reply->result = DID_ERROR << 16; +		goto exit_loopback_cmd; +	} + +	if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) { +		ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n", +			   __func__); +		bsg_reply->result = DID_ERROR << 16; +		goto exit_loopback_cmd; +	} + +	memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1], +	       sizeof(uint32_t) * MBOX_REG_COUNT); + +	if (is_qla8032(ha) || is_qla8042(ha)) { +		status = qla4_83xx_pre_loopback_config(ha, mbox_cmd); +		if (status != QLA_SUCCESS) { +			bsg_reply->result = DID_ERROR << 16; +			goto exit_loopback_cmd; +		} + +		status = qla4_83xx_wait_for_loopback_config_comp(ha, +								 wait_for_link); +		if (status != QLA_SUCCESS) { +			bsg_reply->result = DID_TIME_OUT << 16; +			goto restore; +		} +	} + +	DEBUG2(ql4_printk(KERN_INFO, ha, +			  "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", +			  __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], +			  mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], +			  mbox_cmd[7])); + +	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0], +				&mbox_sts[0]); + +	if (status == QLA_SUCCESS) +		bsg_reply->result = DID_OK << 16; +	else +		bsg_reply->result = DID_ERROR << 16; + +	DEBUG2(ql4_printk(KERN_INFO, ha, +			  "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", +			  __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2], +			  mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], +			  mbox_sts[7])); + +	/* Send mbox_sts to application */ +	bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts); +	rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply); +	memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts)); +restore: +	if (is_qla8032(ha) || is_qla8042(ha)) { +		status = qla4_83xx_post_loopback_config(ha, mbox_cmd); +		if (status != QLA_SUCCESS) { +			bsg_reply->result = DID_ERROR << 16; +			goto exit_loopback_cmd; +		} + +		/* for pre_loopback_config() wait for LINK UP only +		 * if PHY LINK is UP */ +		if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP)) +			wait_for_link = 0; + +		status = qla4_83xx_wait_for_loopback_config_comp(ha, +								 wait_for_link); +		if (status != QLA_SUCCESS) { +			bsg_reply->result = DID_TIME_OUT << 16; +			goto exit_loopback_cmd; +		} +	} +exit_loopback_cmd: +	DEBUG2(ql4_printk(KERN_INFO, ha, +			  "%s: bsg_reply->result = x%x, status = %s\n", +			  __func__, bsg_reply->result, STATUS(status))); +	bsg_job_done(bsg_job, bsg_reply->result, +		     bsg_reply->reply_payload_rcv_len); +} + +static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job) +{ +	struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); +	struct scsi_qla_host *ha = to_qla_host(host); +	struct iscsi_bsg_request *bsg_req = bsg_job->request; +	uint32_t diag_cmd; +	int rval = -EINVAL; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__)); + +	diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; +	if (diag_cmd == MBOX_CMD_DIAG_TEST) { +		switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) { +		case QL_DIAG_CMD_TEST_DDR_SIZE: +		case QL_DIAG_CMD_TEST_DDR_RW: +		case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW: +		case QL_DIAG_CMD_TEST_NVRAM: +		case QL_DIAG_CMD_TEST_FLASH_ROM: +		case QL_DIAG_CMD_TEST_DMA_XFER: +		case QL_DIAG_CMD_SELF_DDR_RW: +		case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW: +			/* Execute diag test for adapter RAM/FLASH */ +			ql4xxx_execute_diag_cmd(bsg_job); +			/* Always return success as we want to sent bsg_reply +			 * to Application */ +			rval = QLA_SUCCESS; +			break; + +		case QL_DIAG_CMD_TEST_INT_LOOPBACK: +		case QL_DIAG_CMD_TEST_EXT_LOOPBACK: +			/* Execute diag test for Network */ +			qla4xxx_execute_diag_loopback_cmd(bsg_job); +			/* Always return success as we want to sent bsg_reply +			 * to Application */ +			rval = QLA_SUCCESS; +			break; +		default: +			ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n", +				   __func__, +				   bsg_req->rqst_data.h_vendor.vendor_cmd[2]); +		} +	} else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) || +		   (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) { +		ql4xxx_execute_diag_cmd(bsg_job); +		rval = QLA_SUCCESS; +	} else { +		ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n", +			   __func__, diag_cmd); +	} + +	return rval; +} +  /**   * qla4xxx_process_vendor_specific - handle vendor specific bsg request   * @job: iscsi_bsg_job to handle @@ -479,6 +836,9 @@ int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)  	case QLISCSI_VND_GET_ACB:  		return qla4xxx_bsg_get_acb(bsg_job); +	case QLISCSI_VND_DIAG_TEST: +		return qla4xxx_execute_diag_test(bsg_job); +  	default:  		ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "  			   "0x%x\n", __func__, bsg_req->msgcode); diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h index c6a0364509f..88c2401910c 100644 --- a/drivers/scsi/qla4xxx/ql4_bsg.h +++ b/drivers/scsi/qla4xxx/ql4_bsg.h @@ -15,5 +15,18 @@  #define QLISCSI_VND_UPDATE_NVRAM	5  #define QLISCSI_VND_RESTORE_DEFAULTS	6  #define QLISCSI_VND_GET_ACB		7 +#define QLISCSI_VND_DIAG_TEST		8 + +/* QLISCSI_VND_DIAG_CMD sub code */ +#define QL_DIAG_CMD_TEST_DDR_SIZE	0x2 +#define QL_DIAG_CMD_TEST_DDR_RW		0x3 +#define QL_DIAG_CMD_TEST_ONCHIP_MEM_RW	0x4 +#define QL_DIAG_CMD_TEST_NVRAM		0x5	/* Only ISP4XXX */ +#define QL_DIAG_CMD_TEST_FLASH_ROM	0x6 +#define QL_DIAG_CMD_TEST_INT_LOOPBACK	0x7 +#define QL_DIAG_CMD_TEST_EXT_LOOPBACK	0x8 +#define QL_DIAG_CMD_TEST_DMA_XFER	0x9	/* Only ISP4XXX */ +#define QL_DIAG_CMD_SELF_DDR_RW		0xC +#define QL_DIAG_CMD_SELF_ONCHIP_MEM_RW	0xD  #endif diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 41327d46ecf..8f6d0fb2cd8 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -73,6 +73,7 @@  #define QLA_SUCCESS			0  #define QLA_ERROR			1 +#define STATUS(status)		status == QLA_ERROR ? "FAILED" : "SUCCEEDED"  /*   * Data bit definitions @@ -179,6 +180,10 @@  		n &= ~v;	\  } +#define OP_STATE(o, f, p) {			\ +	p = (o & f) ? "enable" : "disable";	\ +} +  /*   * Retry & Timeout Values   */ @@ -189,7 +194,7 @@  #define ADAPTER_INIT_TOV		30  #define ADAPTER_RESET_TOV		180  #define EXTEND_CMD_TOV			60 -#define WAIT_CMD_TOV			30 +#define WAIT_CMD_TOV			5  #define EH_WAIT_CMD_TOV			120  #define FIRMWARE_UP_TOV			60  #define RESET_FIRMWARE_TOV		30 @@ -206,6 +211,8 @@  #define MAX_RESET_HA_RETRIES		2  #define FW_ALIVE_WAIT_TOV		3  #define IDC_EXTEND_TOV			8 +#define IDC_COMP_TOV			5 +#define LINK_UP_COMP_TOV		30  #define CMD_SP(Cmnd)			((Cmnd)->SCp.ptr) @@ -290,6 +297,8 @@ struct ddb_entry {  	/* Driver Re-login  */  	unsigned long flags;		  /* DDB Flags */ +#define DDB_CONN_CLOSE_FAILURE		0 /* 0x00000001 */ +  	uint16_t default_relogin_timeout; /*  Max time to wait for  					   *  relogin to complete */  	atomic_t retry_relogin_timer;	  /* Min Time between relogins @@ -306,6 +315,7 @@ struct ddb_entry {  struct qla_ddb_index {  	struct list_head list;  	uint16_t fw_ddb_idx; +	uint16_t flash_ddb_idx;  	struct dev_db_entry fw_ddb;  	uint8_t flash_isid[6];  }; @@ -475,6 +485,34 @@ struct ipaddress_config {  	uint16_t eth_mtu_size;  	uint16_t ipv4_port;  	uint16_t ipv6_port; +	uint8_t control; +	uint16_t ipv6_tcp_options; +	uint8_t tcp_wsf; +	uint8_t ipv6_tcp_wsf; +	uint8_t ipv4_tos; +	uint8_t ipv4_cache_id; +	uint8_t ipv6_cache_id; +	uint8_t ipv4_alt_cid_len; +	uint8_t ipv4_alt_cid[11]; +	uint8_t ipv4_vid_len; +	uint8_t ipv4_vid[11]; +	uint8_t ipv4_ttl; +	uint16_t ipv6_flow_lbl; +	uint8_t ipv6_traffic_class; +	uint8_t ipv6_hop_limit; +	uint32_t ipv6_nd_reach_time; +	uint32_t ipv6_nd_rexmit_timer; +	uint32_t ipv6_nd_stale_timeout; +	uint8_t ipv6_dup_addr_detect_count; +	uint32_t ipv6_gw_advrt_mtu; +	uint16_t def_timeout; +	uint8_t abort_timer; +	uint16_t iscsi_options; +	uint16_t iscsi_max_pdu_size; +	uint16_t iscsi_first_burst_len; +	uint16_t iscsi_max_outstnd_r2t; +	uint16_t iscsi_max_burst_len; +	uint8_t iscsi_name[224];  };  #define QL4_CHAP_MAX_NAME_LEN 256 @@ -544,7 +582,6 @@ struct scsi_qla_host {  #define AF_82XX_FW_DUMPED		24 /* 0x01000000 */  #define AF_8XXX_RST_OWNER		25 /* 0x02000000 */  #define AF_82XX_DUMP_READING		26 /* 0x04000000 */ -#define AF_83XX_NO_FW_DUMP		27 /* 0x08000000 */  #define AF_83XX_IOCB_INTR_ON		28 /* 0x10000000 */  #define AF_83XX_MBOX_INTR_ON		29 /* 0x20000000 */ @@ -559,11 +596,12 @@ struct scsi_qla_host {  #define DPC_AEN				9 /* 0x00000200 */  #define DPC_GET_DHCP_IP_ADDR		15 /* 0x00008000 */  #define DPC_LINK_CHANGED		18 /* 0x00040000 */ -#define DPC_RESET_ACTIVE		20 /* 0x00040000 */ -#define DPC_HA_UNRECOVERABLE		21 /* 0x00080000 ISP-82xx only*/ -#define DPC_HA_NEED_QUIESCENT		22 /* 0x00100000 ISP-82xx only*/ -#define DPC_POST_IDC_ACK		23 /* 0x00200000 */ +#define DPC_RESET_ACTIVE		20 /* 0x00100000 */ +#define DPC_HA_UNRECOVERABLE		21 /* 0x00200000 ISP-82xx only*/ +#define DPC_HA_NEED_QUIESCENT		22 /* 0x00400000 ISP-82xx only*/ +#define DPC_POST_IDC_ACK		23 /* 0x00800000 */  #define DPC_RESTORE_ACB			24 /* 0x01000000 */ +#define DPC_SYSFS_DDB_EXPORT		25 /* 0x02000000 */  	struct Scsi_Host *host; /* pointer to host data */  	uint32_t tot_ddbs; @@ -732,6 +770,7 @@ struct scsi_qla_host {  	uint32_t fw_dump_capture_mask;  	void *fw_dump_tmplt_hdr;  	uint32_t fw_dump_tmplt_size; +	uint32_t fw_dump_skip_size;  	struct completion mbx_intr_comp; @@ -789,6 +828,11 @@ struct scsi_qla_host {  	uint32_t pf_bit;  	struct qla4_83xx_idc_information idc_info;  	struct addr_ctrl_blk *saved_acb; +	int notify_idc_comp; +	int notify_link_up_comp; +	int idc_extend_tmo; +	struct completion idc_comp; +	struct completion link_up_comp;  };  struct ql4_task_data { @@ -869,7 +913,8 @@ static inline int is_qla80XX(struct scsi_qla_host *ha)  static inline int is_aer_supported(struct scsi_qla_host *ha)  {  	return ((ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022) || -		(ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324)); +		(ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324) || +		(ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042));  }  static inline int adapter_up(struct scsi_qla_host *ha) diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 51d1a70f8b4..699575efc9b 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -390,6 +390,7 @@ struct qla_flt_region {  #define MBOX_CMD_CLEAR_DATABASE_ENTRY		0x0031  #define MBOX_CMD_CONN_OPEN			0x0074  #define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT		0x0056 +#define DDB_NOT_LOGGED_IN			0x09  #define LOGOUT_OPTION_CLOSE_SESSION		0x0002  #define LOGOUT_OPTION_RELOGIN			0x0004  #define LOGOUT_OPTION_FREE_DDB			0x0008 @@ -410,6 +411,7 @@ struct qla_flt_region {  #define DDB_DS_LOGIN_IN_PROCESS			0x07  #define MBOX_CMD_GET_FW_STATE			0x0069  #define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A +#define MBOX_CMD_DIAG_TEST			0x0075  #define MBOX_CMD_GET_SYS_INFO			0x0078  #define MBOX_CMD_GET_NVRAM			0x0078	/* For 40xx */  #define MBOX_CMD_SET_NVRAM			0x0079	/* For 40xx */ @@ -425,8 +427,17 @@ struct qla_flt_region {  #define MBOX_CMD_GET_IP_ADDR_STATE		0x0091  #define MBOX_CMD_SEND_IPV6_ROUTER_SOL		0x0092  #define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR	0x0093 +#define MBOX_CMD_SET_PORT_CONFIG		0x0122 +#define MBOX_CMD_GET_PORT_CONFIG		0x0123 +#define MBOX_CMD_SET_LED_CONFIG			0x0125 +#define MBOX_CMD_GET_LED_CONFIG			0x0126  #define MBOX_CMD_MINIDUMP			0x0129 +/* Port Config */ +#define ENABLE_INTERNAL_LOOPBACK		0x04 +#define ENABLE_EXTERNAL_LOOPBACK		0x08 +#define ENABLE_DCBX				0x10 +  /* Minidump subcommand */  #define MINIDUMP_GET_SIZE_SUBCOMMAND		0x00  #define MINIDUMP_GET_TMPLT_SUBCOMMAND		0x01 @@ -495,9 +506,9 @@ struct qla_flt_region {  #define MBOX_ASTS_RESPONSE_QUEUE_FULL		0x8028  #define MBOX_ASTS_IP_ADDR_STATE_CHANGED		0x8029  #define MBOX_ASTS_IPV6_DEFAULT_ROUTER_CHANGED	0x802A -#define MBOX_ASTS_IPV6_PREFIX_EXPIRED		0x802B -#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED	0x802C -#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED	0x802D +#define MBOX_ASTS_IPV6_LINK_MTU_CHANGE		0x802B +#define MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED	0x802C +#define MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED	0x802D  #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD		0x802E  #define MBOX_ASTS_INITIALIZATION_FAILED		0x8031  #define MBOX_ASTS_SYSTEM_WARNING_EVENT		0x8036 @@ -518,14 +529,14 @@ struct qla_flt_region {  #define ACB_CONFIG_DISABLE		0x00  #define ACB_CONFIG_SET			0x01 -/* ACB State Defines */ -#define ACB_STATE_UNCONFIGURED	0x00 -#define ACB_STATE_INVALID	0x01 -#define ACB_STATE_ACQUIRING	0x02 -#define ACB_STATE_TENTATIVE	0x03 -#define ACB_STATE_DEPRICATED	0x04 -#define ACB_STATE_VALID		0x05 -#define ACB_STATE_DISABLING	0x06 +/* ACB/IP Address State Defines */ +#define IP_ADDRSTATE_UNCONFIGURED	0 +#define IP_ADDRSTATE_INVALID		1 +#define IP_ADDRSTATE_ACQUIRING		2 +#define IP_ADDRSTATE_TENTATIVE		3 +#define IP_ADDRSTATE_DEPRICATED		4 +#define IP_ADDRSTATE_PREFERRED		5 +#define IP_ADDRSTATE_DISABLING		6  /* FLASH offsets */  #define FLASH_SEGMENT_IFCB	0x04000000 @@ -535,9 +546,9 @@ struct qla_flt_region {  #define FLASH_OPT_COMMIT	2  #define FLASH_OPT_RMW_COMMIT	3 -/* Loopback type */ -#define ENABLE_INTERNAL_LOOPBACK	0x04 -#define ENABLE_EXTERNAL_LOOPBACK	0x08 +/* generic defines to enable/disable params */ +#define QL4_PARAM_DISABLE	0 +#define QL4_PARAM_ENABLE	1  /*************************************************************************/ @@ -547,6 +558,7 @@ struct addr_ctrl_blk {  #define  IFCB_VER_MIN			0x01  #define  IFCB_VER_MAX			0x02  	uint8_t control;	/* 01 */ +#define	 CTRLOPT_NEW_CONN_DISABLE	0x0002  	uint16_t fw_options;	/* 02-03 */  #define	 FWOPT_HEARTBEAT_ENABLE		  0x1000 @@ -578,11 +590,40 @@ struct addr_ctrl_blk {  	uint32_t shdwreg_addr_hi;	/* 2C-2F */  	uint16_t iscsi_opts;	/* 30-31 */ +#define ISCSIOPTS_HEADER_DIGEST_EN		0x2000 +#define ISCSIOPTS_DATA_DIGEST_EN		0x1000 +#define ISCSIOPTS_IMMEDIATE_DATA_EN		0x0800 +#define ISCSIOPTS_INITIAL_R2T_EN		0x0400 +#define ISCSIOPTS_DATA_SEQ_INORDER_EN		0x0200 +#define ISCSIOPTS_DATA_PDU_INORDER_EN		0x0100 +#define ISCSIOPTS_CHAP_AUTH_EN			0x0080 +#define ISCSIOPTS_SNACK_EN			0x0040 +#define ISCSIOPTS_DISCOVERY_LOGOUT_EN		0x0020 +#define ISCSIOPTS_BIDI_CHAP_EN			0x0010 +#define ISCSIOPTS_DISCOVERY_AUTH_EN		0x0008 +#define ISCSIOPTS_STRICT_LOGIN_COMP_EN		0x0004 +#define ISCSIOPTS_ERL				0x0003  	uint16_t ipv4_tcp_opts;	/* 32-33 */ +#define TCPOPT_DELAYED_ACK_DISABLE	0x8000  #define TCPOPT_DHCP_ENABLE		0x0200 +#define TCPOPT_DNS_SERVER_IP_EN		0x0100 +#define TCPOPT_SLP_DA_INFO_EN		0x0080 +#define TCPOPT_NAGLE_ALGO_DISABLE	0x0020 +#define TCPOPT_WINDOW_SCALE_DISABLE	0x0010 +#define TCPOPT_TIMER_SCALE		0x000E +#define TCPOPT_TIMESTAMP_ENABLE		0x0001  	uint16_t ipv4_ip_opts;	/* 34-35 */  #define IPOPT_IPV4_PROTOCOL_ENABLE	0x8000 +#define IPOPT_IPV4_TOS_EN		0x4000  #define IPOPT_VLAN_TAGGING_ENABLE	0x2000 +#define IPOPT_GRAT_ARP_EN		0x1000 +#define IPOPT_ALT_CID_EN		0x0800 +#define IPOPT_REQ_VID_EN		0x0400 +#define IPOPT_USE_VID_EN		0x0200 +#define IPOPT_LEARN_IQN_EN		0x0100 +#define IPOPT_FRAGMENTATION_DISABLE	0x0010 +#define IPOPT_IN_FORWARD_EN		0x0008 +#define IPOPT_ARP_REDIRECT_EN		0x0004  	uint16_t iscsi_max_pdu_size;	/* 36-37 */  	uint8_t ipv4_tos;	/* 38 */ @@ -633,15 +674,24 @@ struct addr_ctrl_blk {  	uint32_t cookie;	/* 200-203 */  	uint16_t ipv6_port;	/* 204-205 */  	uint16_t ipv6_opts;	/* 206-207 */ -#define IPV6_OPT_IPV6_PROTOCOL_ENABLE	0x8000 -#define IPV6_OPT_VLAN_TAGGING_ENABLE	0x2000 +#define IPV6_OPT_IPV6_PROTOCOL_ENABLE		0x8000 +#define IPV6_OPT_VLAN_TAGGING_ENABLE		0x2000 +#define IPV6_OPT_GRAT_NEIGHBOR_ADV_EN		0x1000 +#define IPV6_OPT_REDIRECT_EN			0x0004  	uint16_t ipv6_addtl_opts;	/* 208-209 */ +#define IPV6_ADDOPT_IGNORE_ICMP_ECHO_REQ		0x0040 +#define IPV6_ADDOPT_MLD_EN				0x0004  #define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE	0x0002 /* Pri ACB  								  Only */  #define IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR		0x0001  	uint16_t ipv6_tcp_opts;	/* 20A-20B */ +#define IPV6_TCPOPT_DELAYED_ACK_DISABLE		0x8000 +#define IPV6_TCPOPT_NAGLE_ALGO_DISABLE		0x0020 +#define IPV6_TCPOPT_WINDOW_SCALE_DISABLE	0x0010 +#define IPV6_TCPOPT_TIMER_SCALE			0x000E +#define IPV6_TCPOPT_TIMESTAMP_EN		0x0001  	uint8_t ipv6_tcp_wsf;	/* 20C */  	uint16_t ipv6_flow_lbl;	/* 20D-20F */  	uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */ @@ -649,14 +699,6 @@ struct addr_ctrl_blk {  	uint8_t ipv6_lnk_lcl_addr_state;/* 222 */  	uint8_t ipv6_addr0_state;	/* 223 */  	uint8_t ipv6_addr1_state;	/* 224 */ -#define IP_ADDRSTATE_UNCONFIGURED	0 -#define IP_ADDRSTATE_INVALID		1 -#define IP_ADDRSTATE_ACQUIRING		2 -#define IP_ADDRSTATE_TENTATIVE		3 -#define IP_ADDRSTATE_DEPRICATED		4 -#define IP_ADDRSTATE_PREFERRED		5 -#define IP_ADDRSTATE_DISABLING		6 -  	uint8_t ipv6_dflt_rtr_state;    /* 225 */  #define IPV6_RTRSTATE_UNKNOWN                   0  #define IPV6_RTRSTATE_MANUAL                    1 @@ -1248,7 +1290,88 @@ struct response {  };  struct ql_iscsi_stats { -	uint8_t reserved1[656]; /* 0000-028F */ +	uint64_t mac_tx_frames; /* 0000–0007 */ +	uint64_t mac_tx_bytes; /* 0008–000F */ +	uint64_t mac_tx_multicast_frames; /* 0010–0017 */ +	uint64_t mac_tx_broadcast_frames; /* 0018–001F */ +	uint64_t mac_tx_pause_frames; /* 0020–0027 */ +	uint64_t mac_tx_control_frames; /* 0028–002F */ +	uint64_t mac_tx_deferral; /* 0030–0037 */ +	uint64_t mac_tx_excess_deferral; /* 0038–003F */ +	uint64_t mac_tx_late_collision; /* 0040–0047 */ +	uint64_t mac_tx_abort; /* 0048–004F */ +	uint64_t mac_tx_single_collision; /* 0050–0057 */ +	uint64_t mac_tx_multiple_collision; /* 0058–005F */ +	uint64_t mac_tx_collision; /* 0060–0067 */ +	uint64_t mac_tx_frames_dropped; /* 0068–006F */ +	uint64_t mac_tx_jumbo_frames; /* 0070–0077 */ +	uint64_t mac_rx_frames; /* 0078–007F */ +	uint64_t mac_rx_bytes; /* 0080–0087 */ +	uint64_t mac_rx_unknown_control_frames; /* 0088–008F */ +	uint64_t mac_rx_pause_frames; /* 0090–0097 */ +	uint64_t mac_rx_control_frames; /* 0098–009F */ +	uint64_t mac_rx_dribble; /* 00A0–00A7 */ +	uint64_t mac_rx_frame_length_error; /* 00A8–00AF */ +	uint64_t mac_rx_jabber; /* 00B0–00B7 */ +	uint64_t mac_rx_carrier_sense_error; /* 00B8–00BF */ +	uint64_t mac_rx_frame_discarded; /* 00C0–00C7 */ +	uint64_t mac_rx_frames_dropped; /* 00C8–00CF */ +	uint64_t mac_crc_error; /* 00D0–00D7 */ +	uint64_t mac_encoding_error; /* 00D8–00DF */ +	uint64_t mac_rx_length_error_large; /* 00E0–00E7 */ +	uint64_t mac_rx_length_error_small; /* 00E8–00EF */ +	uint64_t mac_rx_multicast_frames; /* 00F0–00F7 */ +	uint64_t mac_rx_broadcast_frames; /* 00F8–00FF */ +	uint64_t ip_tx_packets; /* 0100–0107 */ +	uint64_t ip_tx_bytes; /* 0108–010F */ +	uint64_t ip_tx_fragments; /* 0110–0117 */ +	uint64_t ip_rx_packets; /* 0118–011F */ +	uint64_t ip_rx_bytes; /* 0120–0127 */ +	uint64_t ip_rx_fragments; /* 0128–012F */ +	uint64_t ip_datagram_reassembly; /* 0130–0137 */ +	uint64_t ip_invalid_address_error; /* 0138–013F */ +	uint64_t ip_error_packets; /* 0140–0147 */ +	uint64_t ip_fragrx_overlap; /* 0148–014F */ +	uint64_t ip_fragrx_outoforder; /* 0150–0157 */ +	uint64_t ip_datagram_reassembly_timeout; /* 0158–015F */ +	uint64_t ipv6_tx_packets; /* 0160–0167 */ +	uint64_t ipv6_tx_bytes; /* 0168–016F */ +	uint64_t ipv6_tx_fragments; /* 0170–0177 */ +	uint64_t ipv6_rx_packets; /* 0178–017F */ +	uint64_t ipv6_rx_bytes; /* 0180–0187 */ +	uint64_t ipv6_rx_fragments; /* 0188–018F */ +	uint64_t ipv6_datagram_reassembly; /* 0190–0197 */ +	uint64_t ipv6_invalid_address_error; /* 0198–019F */ +	uint64_t ipv6_error_packets; /* 01A0–01A7 */ +	uint64_t ipv6_fragrx_overlap; /* 01A8–01AF */ +	uint64_t ipv6_fragrx_outoforder; /* 01B0–01B7 */ +	uint64_t ipv6_datagram_reassembly_timeout; /* 01B8–01BF */ +	uint64_t tcp_tx_segments; /* 01C0–01C7 */ +	uint64_t tcp_tx_bytes; /* 01C8–01CF */ +	uint64_t tcp_rx_segments; /* 01D0–01D7 */ +	uint64_t tcp_rx_byte; /* 01D8–01DF */ +	uint64_t tcp_duplicate_ack_retx; /* 01E0–01E7 */ +	uint64_t tcp_retx_timer_expired; /* 01E8–01EF */ +	uint64_t tcp_rx_duplicate_ack; /* 01F0–01F7 */ +	uint64_t tcp_rx_pure_ackr; /* 01F8–01FF */ +	uint64_t tcp_tx_delayed_ack; /* 0200–0207 */ +	uint64_t tcp_tx_pure_ack; /* 0208–020F */ +	uint64_t tcp_rx_segment_error; /* 0210–0217 */ +	uint64_t tcp_rx_segment_outoforder; /* 0218–021F */ +	uint64_t tcp_rx_window_probe; /* 0220–0227 */ +	uint64_t tcp_rx_window_update; /* 0228–022F */ +	uint64_t tcp_tx_window_probe_persist; /* 0230–0237 */ +	uint64_t ecc_error_correction; /* 0238–023F */ +	uint64_t iscsi_pdu_tx; /* 0240-0247 */ +	uint64_t iscsi_data_bytes_tx; /* 0248-024F */ +	uint64_t iscsi_pdu_rx; /* 0250-0257 */ +	uint64_t iscsi_data_bytes_rx; /* 0258-025F */ +	uint64_t iscsi_io_completed; /* 0260-0267 */ +	uint64_t iscsi_unexpected_io_rx; /* 0268-026F */ +	uint64_t iscsi_format_error; /* 0270-0277 */ +	uint64_t iscsi_hdr_digest_error; /* 0278-027F */ +	uint64_t iscsi_data_digest_error; /* 0280-0287 */ +	uint64_t iscsi_sequence_error; /* 0288-028F */  	uint32_t tx_cmd_pdu; /* 0290-0293 */  	uint32_t tx_resp_pdu; /* 0294-0297 */  	uint32_t rx_cmd_pdu; /* 0298-029B */ @@ -1292,6 +1415,9 @@ struct ql_iscsi_stats {  #define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN	16  #define QLA83XX_SS_OCM_WNDREG_INDEX		3  #define QLA83XX_SS_PCI_INDEX			0 +#define QLA8022_TEMPLATE_CAP_OFFSET		172 +#define QLA83XX_TEMPLATE_CAP_OFFSET		268 +#define QLA80XX_TEMPLATE_RESERVED_BITS		16  struct qla4_8xxx_minidump_template_hdr {  	uint32_t entry_type; @@ -1311,6 +1437,7 @@ struct qla4_8xxx_minidump_template_hdr {  	uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];  	uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];  	uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN]; +	uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS];  };  #endif /*  _QLA4X_FW_H */ diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index e6f2a2669db..5f58b451327 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -83,6 +83,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,  		uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);  int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,  			   char *password, int bidi, uint16_t *chap_index); +int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, +		     uint16_t idx, int bidi);  void qla4xxx_queue_iocb(struct scsi_qla_host *ha);  void qla4xxx_complete_iocb(struct scsi_qla_host *ha); @@ -272,8 +274,14 @@ int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,  int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,  		    uint32_t acb_type, uint32_t len);  int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config); -int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, +int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha,  				uint64_t addr, uint32_t *data, uint32_t count); +uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state); +int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config); +int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config); +int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha); +int qla4_83xx_is_detached(struct scsi_qla_host *ha); +int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha);  extern int ql4xextended_error_logging;  extern int ql4xdontresethba; diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 7456eeb2e58..6f12f859b11 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -282,6 +282,25 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)  	return ipv4_wait|ipv6_wait;  } +static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha, +		struct qla4_8xxx_minidump_template_hdr *md_hdr) +{ +	int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET : +					QLA83XX_TEMPLATE_CAP_OFFSET; +	int rval = 1; +	uint32_t *cap_offset; + +	cap_offset = (uint32_t *)((char *)md_hdr + offset); + +	if (!(le32_to_cpu(*cap_offset) & BIT_0)) { +		ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n", +			   *cap_offset); +		rval = 0; +	} + +	return rval; +} +  /**   * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.   * @ha: pointer to host adapter structure. @@ -294,6 +313,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)  	void *md_tmp;  	dma_addr_t md_tmp_dma;  	struct qla4_8xxx_minidump_template_hdr *md_hdr; +	int dma_capable;  	if (ha->fw_dump) {  		ql4_printk(KERN_WARNING, ha, @@ -326,13 +346,19 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)  	md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; +	dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr); +  	capture_debug_level = md_hdr->capture_debug_level;  	/* Get capture mask based on module loadtime setting. */ -	if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) +	if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) || +	    (ql4xmdcapmask == 0xFF && dma_capable))  {  		ha->fw_dump_capture_mask = ql4xmdcapmask; -	else +	} else { +		if (ql4xmdcapmask == 0xFF) +			ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n");  		ha->fw_dump_capture_mask = capture_debug_level; +	}  	md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; @@ -864,6 +890,8 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)  	if (status == QLA_SUCCESS) {  		if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))  			qla4xxx_get_crash_record(ha); + +		qla4xxx_init_rings(ha);  	} else {  		DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",  			     ha->host_no, __func__)); @@ -959,13 +987,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)  		qla4xxx_build_ddb_list(ha, is_reset);  	set_bit(AF_ONLINE, &ha->flags); -exit_init_hba: -	if (is_qla80XX(ha) && (status == QLA_ERROR)) { -		/* Since interrupts are registered in start_firmware for -		 * 80XX, release them here if initialize_adapter fails */ -		qla4xxx_free_irqs(ha); -	} +exit_init_hba:  	DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,  	    status == QLA_ERROR ? "FAILED" : "SUCCEEDED"));  	return status; diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h index 8503ad643bd..655b7bb644d 100644 --- a/drivers/scsi/qla4xxx/ql4_inline.h +++ b/drivers/scsi/qla4xxx/ql4_inline.h @@ -82,3 +82,15 @@ qla4xxx_disable_intrs(struct scsi_qla_host *ha)  	__qla4xxx_disable_intrs(ha);  	spin_unlock_irqrestore(&ha->hardware_lock, flags);  } + +static inline int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry) +{ +	int type; + +	if (chap_entry->flags & BIT_7) +		type = LOCAL_CHAP; +	else +		type = BIDI_CHAP; + +	return type; +} diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 7dff09f09b7..081b6b78d2c 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -385,9 +385,9 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,  	cls_conn = ddb_entry->conn;  	conn = cls_conn->dd_data; -	spin_lock(&conn->session->lock); +	spin_lock(&conn->session->back_lock);  	task = iscsi_itt_to_task(conn, itt); -	spin_unlock(&conn->session->lock); +	spin_unlock(&conn->session->back_lock);  	if (task == NULL) {  		ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__); @@ -606,6 +606,48 @@ static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)  	return rval;  } +static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha, +					uint32_t ipaddr_idx, +					uint32_t ipaddr_fw_state) +{ +	uint8_t ipaddr_state; +	uint8_t ip_idx; + +	ip_idx = ipaddr_idx & 0xF; +	ipaddr_state = qla4xxx_set_ipaddr_state((uint8_t)ipaddr_fw_state); + +	switch (ip_idx) { +	case 0: +		ha->ip_config.ipv4_addr_state = ipaddr_state; +		break; +	case 1: +		ha->ip_config.ipv6_link_local_state = ipaddr_state; +		break; +	case 2: +		ha->ip_config.ipv6_addr0_state = ipaddr_state; +		break; +	case 3: +		ha->ip_config.ipv6_addr1_state = ipaddr_state; +		break; +	default: +		ql4_printk(KERN_INFO, ha, "%s: Invalid IPADDR index %d\n", +			   __func__, ip_idx); +	} +} + +static void qla4xxx_default_router_changed(struct scsi_qla_host *ha, +					   uint32_t *mbox_sts) +{ +	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[0], +	       &mbox_sts[2], sizeof(uint32_t)); +	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[1], +	       &mbox_sts[3], sizeof(uint32_t)); +	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[2], +	       &mbox_sts[4], sizeof(uint32_t)); +	memcpy(&ha->ip_config.ipv6_default_router_addr.s6_addr32[3], +	       &mbox_sts[5], sizeof(uint32_t)); +} +  /**   * qla4xxx_isr_decode_mailbox - decodes mailbox status   * @ha: Pointer to host adapter structure. @@ -620,6 +662,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,  	int i;  	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];  	__le32 __iomem *mailbox_out; +	uint32_t opcode = 0;  	if (is_qla8032(ha) || is_qla8042(ha))  		mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0]; @@ -698,6 +741,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,  			qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,  					      sizeof(mbox_sts),  					      (uint8_t *) mbox_sts); + +			if ((is_qla8032(ha) || is_qla8042(ha)) && +			    ha->notify_link_up_comp) +				complete(&ha->link_up_comp); +  			break;  		case MBOX_ASTS_LINK_DOWN: @@ -741,29 +789,48 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,  			    "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],  			    mbox_sts[2], mbox_sts[3]); +			qla4xxx_update_ipaddr_state(ha, mbox_sts[5], +						    mbox_sts[3]);  			/* mbox_sts[2] = Old ACB state  			 * mbox_sts[3] = new ACB state */ -			if ((mbox_sts[3] == ACB_STATE_VALID) && -			    ((mbox_sts[2] == ACB_STATE_TENTATIVE) || -			    (mbox_sts[2] == ACB_STATE_ACQUIRING))) { +			if ((mbox_sts[3] == IP_ADDRSTATE_PREFERRED) && +			    ((mbox_sts[2] == IP_ADDRSTATE_TENTATIVE) || +			     (mbox_sts[2] == IP_ADDRSTATE_ACQUIRING))) {  				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); -			} else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && -				   (mbox_sts[2] == ACB_STATE_VALID)) { +			} else if ((mbox_sts[3] == IP_ADDRSTATE_ACQUIRING) && +				   (mbox_sts[2] == IP_ADDRSTATE_PREFERRED)) {  				if (is_qla80XX(ha))  					set_bit(DPC_RESET_HA_FW_CONTEXT,  						&ha->dpc_flags);  				else  					set_bit(DPC_RESET_HA, &ha->dpc_flags); -			} else if (mbox_sts[3] == ACB_STATE_DISABLING) { +			} else if (mbox_sts[3] == IP_ADDRSTATE_DISABLING) {  				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",  					   ha->host_no, __func__); -			} else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) { +			} else if (mbox_sts[3] == IP_ADDRSTATE_UNCONFIGURED) {  				complete(&ha->disable_acb_comp);  				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",  					   ha->host_no, __func__);  			}  			break; +		case MBOX_ASTS_IPV6_LINK_MTU_CHANGE: +		case MBOX_ASTS_IPV6_AUTO_PREFIX_IGNORED: +		case MBOX_ASTS_IPV6_ND_LOCAL_PREFIX_IGNORED: +			/* No action */ +			DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: AEN %04x\n", +					  ha->host_no, mbox_status)); +			break; + +		case MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD: +			DEBUG2(ql4_printk(KERN_INFO, ha, +					  "scsi%ld: AEN %04x, IPv6 ERROR, " +					  "mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", +					  ha->host_no, mbox_sts[0], mbox_sts[1], +					  mbox_sts[2], mbox_sts[3], mbox_sts[4], +					  mbox_sts[5])); +			break; +  		case MBOX_ASTS_MAC_ADDRESS_CHANGED:  		case MBOX_ASTS_DNS:  			/* No action */ @@ -841,8 +908,6 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,  			break;  		case MBOX_ASTS_IDC_REQUEST_NOTIFICATION: -		{ -			uint32_t opcode;  			if (is_qla8032(ha) || is_qla8042(ha)) {  				DEBUG2(ql4_printk(KERN_INFO, ha,  						  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", @@ -862,7 +927,6 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,  				}  			}  			break; -		}  		case MBOX_ASTS_IDC_COMPLETE:  			if (is_qla8032(ha) || is_qla8042(ha)) { @@ -875,6 +939,14 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,  						  "scsi:%ld: AEN %04x IDC Complete notification\n",  						  ha->host_no, mbox_sts[0])); +				opcode = mbox_sts[1] >> 16; +				if (ha->notify_idc_comp) +					complete(&ha->idc_comp); + +				if ((opcode == MBOX_CMD_SET_PORT_CONFIG) || +				    (opcode == MBOX_CMD_PORT_RESET)) +					ha->idc_info.info2 = mbox_sts[3]; +  				if (qla4_83xx_loopback_in_progress(ha)) {  					set_bit(AF_LOOPBACK, &ha->flags);  				} else { @@ -896,6 +968,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,  			DEBUG2(ql4_printk(KERN_INFO, ha,  					  "scsi%ld: AEN %04x Received IPv6 default router changed notification\n",  					  ha->host_no, mbox_sts[0])); +			qla4xxx_default_router_changed(ha, mbox_sts);  			break;  		case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION: @@ -907,6 +980,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,  			DEBUG2(ql4_printk(KERN_INFO, ha,  					  "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",  					  ha->host_no, mbox_sts[0])); +			/* new IDC timeout */ +			ha->idc_extend_tmo = mbox_sts[1];  			break;  		case MBOX_ASTS_INITIALIZATION_FAILED: @@ -977,7 +1052,8 @@ void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,      uint32_t intr_status)  {  	/* Process response queue interrupt. */ -	if (intr_status & HSRX_RISC_IOCB_INT) +	if ((intr_status & HSRX_RISC_IOCB_INT) && +	    test_bit(AF_INIT_DONE, &ha->flags))  		qla4xxx_process_response_queue(ha);  	/* Process mailbox/asynch event interrupt.*/ @@ -1354,6 +1430,7 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)  {  	struct scsi_qla_host *ha = dev_id;  	unsigned long flags; +	int intr_status;  	uint32_t ival = 0;  	spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1367,8 +1444,15 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)  		qla4xxx_process_response_queue(ha);  		writel(0, &ha->qla4_83xx_reg->iocb_int_mask);  	} else { -		qla4xxx_process_response_queue(ha); -		writel(0, &ha->qla4_82xx_reg->host_int); +		intr_status = readl(&ha->qla4_82xx_reg->host_status); +		if (intr_status & HSRX_RISC_IOCB_INT) { +			qla4xxx_process_response_queue(ha); +			writel(0, &ha->qla4_82xx_reg->host_int); +		} else { +			ql4_printk(KERN_INFO, ha, "%s: spurious iocb interrupt...\n", +				   __func__); +			goto exit_msix_rsp_q; +		}  	}  	ha->isr_count++;  exit_msix_rsp_q: @@ -1442,7 +1526,8 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)  int qla4xxx_request_irqs(struct scsi_qla_host *ha)  { -	int ret; +	int ret = 0; +	int rval = QLA_ERROR;  	if (is_qla40XX(ha))  		goto try_intx; @@ -1495,15 +1580,13 @@ try_msi:  		}  	} -	/* -	 * Prevent interrupts from falling back to INTx mode in cases where -	 * interrupts cannot get acquired through MSI-X or MSI mode. -	 */ +try_intx:  	if (is_qla8022(ha)) { -		ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret); +		ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n", +			   __func__);  		goto irq_not_attached;  	} -try_intx: +  	/* Trying INTx */  	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,  	    IRQF_SHARED, DRIVER_NAME, ha); @@ -1523,9 +1606,10 @@ irq_attached:  	set_bit(AF_IRQ_ATTACHED, &ha->flags);  	ha->host->irq = ha->pdev->irq;  	ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", -	    __func__, ha->pdev->irq); +		   __func__, ha->pdev->irq); +	rval = QLA_SUCCESS;  irq_not_attached: -	return ret; +	return rval;  }  void qla4xxx_free_irqs(struct scsi_qla_host *ha) diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 62d4208af21..0a3312c6dd6 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -212,9 +212,8 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,  			    ha->host_no, __func__));  			goto mbox_exit;  		} -		DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...," -			      " Scheduling Adapter Reset\n", ha->host_no, -			      mbx_cmd[0])); +		ql4_printk(KERN_WARNING, ha, "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n", +			   ha->host_no, mbx_cmd[0]);  		ha->mailbox_timeout_count++;  		mbx_sts[0] = (-1);  		set_bit(DPC_RESET_HA, &ha->dpc_flags); @@ -251,15 +250,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,  		break;  	case MBOX_STS_BUSY: -		DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n", -			       ha->host_no, __func__, mbx_cmd[0])); +		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: Cmd = %08X, ISP BUSY\n", +			   ha->host_no, __func__, mbx_cmd[0]);  		ha->mailbox_timeout_count++;  		break;  	default: -		DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, " -			      "sts = %08X ****\n", ha->host_no, __func__, -			      mbx_cmd[0], mbx_sts[0])); +		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n", +			   ha->host_no, __func__, mbx_cmd[0], mbx_sts[0], +			   mbx_sts[1], mbx_sts[2], mbx_sts[3], mbx_sts[4], +			   mbx_sts[5], mbx_sts[6], mbx_sts[7]);  		break;  	}  	spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -383,7 +383,6 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,  	mbox_cmd[2] = LSDW(init_fw_cb_dma);  	mbox_cmd[3] = MSDW(init_fw_cb_dma);  	mbox_cmd[4] = sizeof(struct addr_ctrl_blk); -	mbox_cmd[5] = (IFCB_VER_MAX << 8) | IFCB_VER_MIN;  	if (qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts) !=  	    QLA_SUCCESS) { @@ -418,6 +417,38 @@ qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,  	return QLA_SUCCESS;  } +uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state) +{ +	uint8_t ipaddr_state; + +	switch (fw_ipaddr_state) { +	case IP_ADDRSTATE_UNCONFIGURED: +		ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED; +		break; +	case IP_ADDRSTATE_INVALID: +		ipaddr_state = ISCSI_IPDDRESS_STATE_INVALID; +		break; +	case IP_ADDRSTATE_ACQUIRING: +		ipaddr_state = ISCSI_IPDDRESS_STATE_ACQUIRING; +		break; +	case IP_ADDRSTATE_TENTATIVE: +		ipaddr_state = ISCSI_IPDDRESS_STATE_TENTATIVE; +		break; +	case IP_ADDRSTATE_DEPRICATED: +		ipaddr_state = ISCSI_IPDDRESS_STATE_DEPRECATED; +		break; +	case IP_ADDRSTATE_PREFERRED: +		ipaddr_state = ISCSI_IPDDRESS_STATE_VALID; +		break; +	case IP_ADDRSTATE_DISABLING: +		ipaddr_state = ISCSI_IPDDRESS_STATE_DISABLING; +		break; +	default: +		ipaddr_state = ISCSI_IPDDRESS_STATE_UNCONFIGURED; +	} +	return ipaddr_state; +} +  static void  qla4xxx_update_local_ip(struct scsi_qla_host *ha,  			struct addr_ctrl_blk *init_fw_cb) @@ -425,7 +456,7 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha,  	ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);  	ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);  	ha->ip_config.ipv4_addr_state = -				le16_to_cpu(init_fw_cb->ipv4_addr_state); +			qla4xxx_set_ipaddr_state(init_fw_cb->ipv4_addr_state);  	ha->ip_config.eth_mtu_size =  				le16_to_cpu(init_fw_cb->eth_mtu_size);  	ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port); @@ -434,6 +465,8 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha,  		ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);  		ha->ip_config.ipv6_addl_options =  				le16_to_cpu(init_fw_cb->ipv6_addtl_opts); +		ha->ip_config.ipv6_tcp_options = +				le16_to_cpu(init_fw_cb->ipv6_tcp_opts);  	}  	/* Save IPv4 Address Info */ @@ -448,17 +481,65 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha,  		   sizeof(init_fw_cb->ipv4_gw_addr)));  	ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag); +	ha->ip_config.control = init_fw_cb->control; +	ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf; +	ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos; +	ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid; +	ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len; +	memcpy(ha->ip_config.ipv4_alt_cid, init_fw_cb->ipv4_dhcp_alt_cid, +	       min(sizeof(ha->ip_config.ipv4_alt_cid), +		   sizeof(init_fw_cb->ipv4_dhcp_alt_cid))); +	ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len; +	memcpy(ha->ip_config.ipv4_vid, init_fw_cb->ipv4_dhcp_vid, +	       min(sizeof(ha->ip_config.ipv4_vid), +		   sizeof(init_fw_cb->ipv4_dhcp_vid))); +	ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl; +	ha->ip_config.def_timeout = le16_to_cpu(init_fw_cb->def_timeout); +	ha->ip_config.abort_timer = init_fw_cb->abort_timer; +	ha->ip_config.iscsi_options = le16_to_cpu(init_fw_cb->iscsi_opts); +	ha->ip_config.iscsi_max_pdu_size = +				le16_to_cpu(init_fw_cb->iscsi_max_pdu_size); +	ha->ip_config.iscsi_first_burst_len = +				le16_to_cpu(init_fw_cb->iscsi_fburst_len); +	ha->ip_config.iscsi_max_outstnd_r2t = +				le16_to_cpu(init_fw_cb->iscsi_max_outstnd_r2t); +	ha->ip_config.iscsi_max_burst_len = +				le16_to_cpu(init_fw_cb->iscsi_max_burst_len); +	memcpy(ha->ip_config.iscsi_name, init_fw_cb->iscsi_name, +	       min(sizeof(ha->ip_config.iscsi_name), +		   sizeof(init_fw_cb->iscsi_name)));  	if (is_ipv6_enabled(ha)) {  		/* Save IPv6 Address */  		ha->ip_config.ipv6_link_local_state = -			le16_to_cpu(init_fw_cb->ipv6_lnk_lcl_addr_state); +		  qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_lnk_lcl_addr_state);  		ha->ip_config.ipv6_addr0_state = -				le16_to_cpu(init_fw_cb->ipv6_addr0_state); +			qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr0_state);  		ha->ip_config.ipv6_addr1_state = -				le16_to_cpu(init_fw_cb->ipv6_addr1_state); -		ha->ip_config.ipv6_default_router_state = -				le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state); +			qla4xxx_set_ipaddr_state(init_fw_cb->ipv6_addr1_state); + +		switch (le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state)) { +		case IPV6_RTRSTATE_UNKNOWN: +			ha->ip_config.ipv6_default_router_state = +						ISCSI_ROUTER_STATE_UNKNOWN; +			break; +		case IPV6_RTRSTATE_MANUAL: +			ha->ip_config.ipv6_default_router_state = +						ISCSI_ROUTER_STATE_MANUAL; +			break; +		case IPV6_RTRSTATE_ADVERTISED: +			ha->ip_config.ipv6_default_router_state = +						ISCSI_ROUTER_STATE_ADVERTISED; +			break; +		case IPV6_RTRSTATE_STALE: +			ha->ip_config.ipv6_default_router_state = +						ISCSI_ROUTER_STATE_STALE; +			break; +		default: +			ha->ip_config.ipv6_default_router_state = +						ISCSI_ROUTER_STATE_UNKNOWN; +		} +  		ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;  		ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80; @@ -479,6 +560,23 @@ qla4xxx_update_local_ip(struct scsi_qla_host *ha,  		ha->ip_config.ipv6_vlan_tag =  				be16_to_cpu(init_fw_cb->ipv6_vlan_tag);  		ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port); +		ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id; +		ha->ip_config.ipv6_flow_lbl = +				le16_to_cpu(init_fw_cb->ipv6_flow_lbl); +		ha->ip_config.ipv6_traffic_class = +				init_fw_cb->ipv6_traffic_class; +		ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit; +		ha->ip_config.ipv6_nd_reach_time = +				le32_to_cpu(init_fw_cb->ipv6_nd_reach_time); +		ha->ip_config.ipv6_nd_rexmit_timer = +				le32_to_cpu(init_fw_cb->ipv6_nd_rexmit_timer); +		ha->ip_config.ipv6_nd_stale_timeout = +				le32_to_cpu(init_fw_cb->ipv6_nd_stale_timeout); +		ha->ip_config.ipv6_dup_addr_detect_count = +					init_fw_cb->ipv6_dup_addr_detect_count; +		ha->ip_config.ipv6_gw_advrt_mtu = +				le32_to_cpu(init_fw_cb->ipv6_gw_advrt_mtu); +		ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf;  	}  } @@ -549,9 +647,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)  		goto exit_init_fw_cb;  	} -	/* Initialize request and response queues. */ -	qla4xxx_init_rings(ha); -  	/* Fill in the request and response queue information. */  	init_fw_cb->rqq_consumer_idx = cpu_to_le16(ha->request_out);  	init_fw_cb->compq_producer_idx = cpu_to_le16(ha->response_in); @@ -903,6 +998,10 @@ int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,  				  "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "  				  "failed sts %04X %04X", __func__,  				  mbox_sts[0], mbox_sts[1])); +		if ((mbox_sts[0] == MBOX_STS_COMMAND_ERROR) && +		    (mbox_sts[1] == DDB_NOT_LOGGED_IN)) { +			set_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags); +		}  	}  	return status; @@ -1530,13 +1629,26 @@ exit_get_chap:  	return ret;  } -static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, -			    char *password, uint16_t idx, int bidi) +/** + * qla4xxx_set_chap - Make a chap entry at the given index + * @ha: pointer to adapter structure + * @username: CHAP username to set + * @password: CHAP password to set + * @idx: CHAP index at which to make the entry + * @bidi: type of chap entry (chap_in or chap_out) + * + * Create chap entry at the given index with the information provided. + * + * Note: Caller should acquire the chap lock before getting here. + **/ +int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password, +		     uint16_t idx, int bidi)  {  	int ret = 0;  	int rval = QLA_ERROR;  	uint32_t offset = 0;  	struct ql4_chap_table *chap_table; +	uint32_t chap_size = 0;  	dma_addr_t chap_dma;  	chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); @@ -1554,7 +1666,20 @@ static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,  	strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);  	strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);  	chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); -	offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table)); + +	if (is_qla40XX(ha)) { +		chap_size = MAX_CHAP_ENTRIES_40XX * sizeof(*chap_table); +		offset = FLASH_CHAP_OFFSET; +	} else { /* Single region contains CHAP info for both ports which is +		  * divided into half for each port. +		  */ +		chap_size = ha->hw.flt_chap_size / 2; +		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); +		if (ha->port_num == 1) +			offset += chap_size; +	} + +	offset += (idx * sizeof(struct ql4_chap_table));  	rval = qla4xxx_set_flash(ha, chap_dma, offset,  				sizeof(struct ql4_chap_table),  				FLASH_OPT_RMW_COMMIT); @@ -1611,7 +1736,7 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,  		goto exit_unlock_uni_chap;  	} -	if (!(chap_table->flags & BIT_6)) { +	if (!(chap_table->flags & BIT_7)) {  		ql4_printk(KERN_ERR, ha, "Unidirectional entry not set\n");  		rval = QLA_ERROR;  		goto exit_unlock_uni_chap; @@ -1793,6 +1918,7 @@ int qla4xxx_disable_acb(struct scsi_qla_host *ha)  				  mbox_sts[0], mbox_sts[1], mbox_sts[2]));  	} else {  		if (is_qla8042(ha) && +		    test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) &&  		    (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {  			/*  			 * Disable ACB mailbox command takes time to complete @@ -2255,7 +2381,7 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)  			ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",  				   __func__);  			rval = QLA_ERROR; -			goto exit_config_acb; +			goto exit_free_acb;  		}  		memcpy(ha->saved_acb, acb, acb_len);  		break; @@ -2269,8 +2395,6 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)  		}  		memcpy(acb, ha->saved_acb, acb_len); -		kfree(ha->saved_acb); -		ha->saved_acb = NULL;  		rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);  		if (rval != QLA_SUCCESS) @@ -2286,8 +2410,55 @@ exit_free_acb:  	dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,  			  acb_dma);  exit_config_acb: +	if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) { +		kfree(ha->saved_acb); +		ha->saved_acb = NULL; +	}  	DEBUG2(ql4_printk(KERN_INFO, ha,  			  "%s %s\n", __func__,  			  rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));  	return rval;  } + +int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config) +{ +	uint32_t mbox_cmd[MBOX_REG_COUNT]; +	uint32_t mbox_sts[MBOX_REG_COUNT]; +	int status; + +	memset(&mbox_cmd, 0, sizeof(mbox_cmd)); +	memset(&mbox_sts, 0, sizeof(mbox_sts)); + +	mbox_cmd[0] = MBOX_CMD_GET_PORT_CONFIG; + +	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, +					 mbox_cmd, mbox_sts); +	if (status == QLA_SUCCESS) +		*config = mbox_sts[1]; +	else +		ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, +			   mbox_sts[0]); + +	return status; +} + +int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config) +{ +	uint32_t mbox_cmd[MBOX_REG_COUNT]; +	uint32_t mbox_sts[MBOX_REG_COUNT]; +	int status; + +	memset(&mbox_cmd, 0, sizeof(mbox_cmd)); +	memset(&mbox_sts, 0, sizeof(mbox_sts)); + +	mbox_cmd[0] = MBOX_CMD_SET_PORT_CONFIG; +	mbox_cmd[1] = *config; + +	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT, +				mbox_cmd, mbox_sts); +	if (status != QLA_SUCCESS) +		ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__, +			   mbox_sts[0]); + +	return status; +} diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index d001202d356..9dbdb4be2d8 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -14,6 +14,7 @@  #include <asm-generic/io-64-nonatomic-lo-hi.h> +#define TIMEOUT_100_MS	100  #define MASK(n)		DMA_BIT_MASK(n)  #define MN_WIN(addr)	(((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))  #define OCM_WIN(addr)	(((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) @@ -1176,6 +1177,112 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)  	return 0;  } +/** + * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory + * @ha: Pointer to adapter structure + * @addr: Flash address to write to + * @data: Data to be written + * @count: word_count to be written + * + * Return: On success return QLA_SUCCESS + *         On error return QLA_ERROR + **/ +int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr, +				uint32_t *data, uint32_t count) +{ +	int i, j; +	uint32_t agt_ctrl; +	unsigned long flags; +	int ret_val = QLA_SUCCESS; + +	/* Only 128-bit aligned access */ +	if (addr & 0xF) { +		ret_val = QLA_ERROR; +		goto exit_ms_mem_write; +	} + +	write_lock_irqsave(&ha->hw_lock, flags); + +	/* Write address */ +	ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0); +	if (ret_val == QLA_ERROR) { +		ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n", +			   __func__); +		goto exit_ms_mem_write_unlock; +	} + +	for (i = 0; i < count; i++, addr += 16) { +		if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET, +					     QLA8XXX_ADDR_QDR_NET_MAX)) || +		      (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET, +					     QLA8XXX_ADDR_DDR_NET_MAX)))) { +			ret_val = QLA_ERROR; +			goto exit_ms_mem_write_unlock; +		} + +		ret_val = ha->isp_ops->wr_reg_indirect(ha, +						       MD_MIU_TEST_AGT_ADDR_LO, +						       addr); +		/* Write data */ +		ret_val |= ha->isp_ops->wr_reg_indirect(ha, +						MD_MIU_TEST_AGT_WRDATA_LO, +						*data++); +		ret_val |= ha->isp_ops->wr_reg_indirect(ha, +						MD_MIU_TEST_AGT_WRDATA_HI, +						*data++); +		ret_val |= ha->isp_ops->wr_reg_indirect(ha, +						MD_MIU_TEST_AGT_WRDATA_ULO, +						*data++); +		ret_val |= ha->isp_ops->wr_reg_indirect(ha, +						MD_MIU_TEST_AGT_WRDATA_UHI, +						*data++); +		if (ret_val == QLA_ERROR) { +			ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n", +				   __func__); +			goto exit_ms_mem_write_unlock; +		} + +		/* Check write status */ +		ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, +						       MIU_TA_CTL_WRITE_ENABLE); +		ret_val |= ha->isp_ops->wr_reg_indirect(ha, +							MD_MIU_TEST_AGT_CTRL, +							MIU_TA_CTL_WRITE_START); +		if (ret_val == QLA_ERROR) { +			ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n", +				   __func__); +			goto exit_ms_mem_write_unlock; +		} + +		for (j = 0; j < MAX_CTL_CHECK; j++) { +			ret_val = ha->isp_ops->rd_reg_indirect(ha, +							MD_MIU_TEST_AGT_CTRL, +							&agt_ctrl); +			if (ret_val == QLA_ERROR) { +				ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", +					   __func__); +				goto exit_ms_mem_write_unlock; +			} +			if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0) +				break; +		} + +		/* Status check failed */ +		if (j >= MAX_CTL_CHECK) { +			printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n", +					   __func__); +			ret_val = QLA_ERROR; +			goto exit_ms_mem_write_unlock; +		} +	} + +exit_ms_mem_write_unlock: +	write_unlock_irqrestore(&ha->hw_lock, flags); + +exit_ms_mem_write: +	return ret_val; +} +  static int  qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)  { @@ -1714,6 +1821,101 @@ void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)  	qla4_82xx_rom_unlock(ha);  } +static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha, +					     uint32_t addr1, uint32_t mask) +{ +	unsigned long timeout; +	uint32_t rval = QLA_SUCCESS; +	uint32_t temp; + +	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); +	do { +		ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); +		if ((temp & mask) != 0) +			break; + +		if (time_after_eq(jiffies, timeout)) { +			ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n"); +			return QLA_ERROR; +		} +	} while (1); + +	return rval; +} + +uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1, +				uint32_t addr3, uint32_t mask, uint32_t addr, +				uint32_t *data_ptr) +{ +	int rval = QLA_SUCCESS; +	uint32_t temp; +	uint32_t data; + +	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); +	if (rval) +		goto exit_ipmdio_rd_reg; + +	temp = (0x40000000 | addr); +	ha->isp_ops->wr_reg_indirect(ha, addr1, temp); + +	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); +	if (rval) +		goto exit_ipmdio_rd_reg; + +	ha->isp_ops->rd_reg_indirect(ha, addr3, &data); +	*data_ptr = data; + +exit_ipmdio_rd_reg: +	return rval; +} + + +static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha, +						    uint32_t addr1, +						    uint32_t addr2, +						    uint32_t addr3, +						    uint32_t mask) +{ +	unsigned long timeout; +	uint32_t temp; +	uint32_t rval = QLA_SUCCESS; + +	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); +	do { +		ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp); +		if ((temp & 0x1) != 1) +			break; +		if (time_after_eq(jiffies, timeout)) { +			ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n"); +			return QLA_ERROR; +		} +	} while (1); + +	return rval; +} + +static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha, +				  uint32_t addr1, uint32_t addr3, +				  uint32_t mask, uint32_t addr, +				  uint32_t value) +{ +	int rval = QLA_SUCCESS; + +	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); +	if (rval) +		goto exit_ipmdio_wr_reg; + +	ha->isp_ops->wr_reg_indirect(ha, addr3, value); +	ha->isp_ops->wr_reg_indirect(ha, addr1, addr); + +	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); +	if (rval) +		goto exit_ipmdio_wr_reg; + +exit_ipmdio_wr_reg: +	return rval; +} +  static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,  				struct qla8xxx_minidump_entry_hdr *entry_hdr,  				uint32_t **d_ptr) @@ -1822,7 +2024,7 @@ error_exit:  	return rval;  } -static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha, +static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha,  				struct qla8xxx_minidump_entry_hdr *entry_hdr,  				uint32_t **d_ptr)  { @@ -1899,11 +2101,11 @@ static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,  		dma_desc.cmd.read_data_size = size;  		/* Prepare: Write pex-dma descriptor to MS memory. */ -		rval = qla4_83xx_ms_mem_write_128b(ha, +		rval = qla4_8xxx_ms_mem_write_128b(ha,  			      (uint64_t)m_hdr->desc_card_addr,  			      (uint32_t *)&dma_desc,  			      (sizeof(struct qla4_83xx_pex_dma_descriptor)/16)); -		if (rval == -1) { +		if (rval != QLA_SUCCESS) {  			ql4_printk(KERN_INFO, ha,  				   "%s: Error writing rdmem-dma-init to MS !!!\n",  				   __func__); @@ -2359,17 +2561,10 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,  	uint32_t *data_ptr = *d_ptr;  	int rval = QLA_SUCCESS; -	if (is_qla8032(ha) || is_qla8042(ha)) { -		rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr, -						       &data_ptr); -		if (rval != QLA_SUCCESS) { -			rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, -								  &data_ptr); -		} -	} else { +	rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr); +	if (rval != QLA_SUCCESS)  		rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,  							  &data_ptr); -	}  	*d_ptr = data_ptr;  	return rval;  } @@ -2383,6 +2578,11 @@ static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,  			  "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",  			  ha->host_no, index, entry_hdr->entry_type,  			  entry_hdr->d_ctrl.entry_capture_mask)); +	/* If driver encounters a new entry type that it cannot process, +	 * it should just skip the entry and adjust the total buffer size by +	 * from subtracting the skipped bytes from it +	 */ +	ha->fw_dump_skip_size += entry_hdr->entry_capture_size;  }  /* ISP83xx functions to process new minidump entries... */ @@ -2435,6 +2635,227 @@ exit_process_pollrd:  	return rval;  } +static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha, +				struct qla8xxx_minidump_entry_hdr *entry_hdr, +				uint32_t **d_ptr) +{ +	int loop_cnt; +	uint32_t addr1, addr2, value, data, temp, wrval; +	uint8_t stride, stride2; +	uint16_t count; +	uint32_t poll, mask, data_size, modify_mask; +	uint32_t wait_count = 0; +	uint32_t *data_ptr = *d_ptr; +	struct qla8044_minidump_entry_rddfe *rddfe; +	uint32_t rval = QLA_SUCCESS; + +	rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr; +	addr1 = le32_to_cpu(rddfe->addr_1); +	value = le32_to_cpu(rddfe->value); +	stride = le32_to_cpu(rddfe->stride); +	stride2 = le32_to_cpu(rddfe->stride2); +	count = le32_to_cpu(rddfe->count); + +	poll = le32_to_cpu(rddfe->poll); +	mask = le32_to_cpu(rddfe->mask); +	modify_mask = le32_to_cpu(rddfe->modify_mask); +	data_size = le32_to_cpu(rddfe->data_size); + +	addr2 = addr1 + stride; + +	for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) { +		ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value)); + +		wait_count = 0; +		while (wait_count < poll) { +			ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); +			if ((temp & mask) != 0) +				break; +			wait_count++; +		} + +		if (wait_count == poll) { +			ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); +			rval = QLA_ERROR; +			goto exit_process_rddfe; +		} else { +			ha->isp_ops->rd_reg_indirect(ha, addr2, &temp); +			temp = temp & modify_mask; +			temp = (temp | ((loop_cnt << 16) | loop_cnt)); +			wrval = ((temp << 16) | temp); + +			ha->isp_ops->wr_reg_indirect(ha, addr2, wrval); +			ha->isp_ops->wr_reg_indirect(ha, addr1, value); + +			wait_count = 0; +			while (wait_count < poll) { +				ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); +				if ((temp & mask) != 0) +					break; +				wait_count++; +			} +			if (wait_count == poll) { +				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", +					   __func__); +				rval = QLA_ERROR; +				goto exit_process_rddfe; +			} + +			ha->isp_ops->wr_reg_indirect(ha, addr1, +						     ((0x40000000 | value) + +						     stride2)); +			wait_count = 0; +			while (wait_count < poll) { +				ha->isp_ops->rd_reg_indirect(ha, addr1, &temp); +				if ((temp & mask) != 0) +					break; +				wait_count++; +			} + +			if (wait_count == poll) { +				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", +					   __func__); +				rval = QLA_ERROR; +				goto exit_process_rddfe; +			} + +			ha->isp_ops->rd_reg_indirect(ha, addr2, &data); + +			*data_ptr++ = cpu_to_le32(wrval); +			*data_ptr++ = cpu_to_le32(data); +		} +	} + +	*d_ptr = data_ptr; +exit_process_rddfe: +	return rval; +} + +static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha, +				struct qla8xxx_minidump_entry_hdr *entry_hdr, +				uint32_t **d_ptr) +{ +	int rval = QLA_SUCCESS; +	uint32_t addr1, addr2, value1, value2, data, selval; +	uint8_t stride1, stride2; +	uint32_t addr3, addr4, addr5, addr6, addr7; +	uint16_t count, loop_cnt; +	uint32_t poll, mask; +	uint32_t *data_ptr = *d_ptr; +	struct qla8044_minidump_entry_rdmdio *rdmdio; + +	rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr; +	addr1 = le32_to_cpu(rdmdio->addr_1); +	addr2 = le32_to_cpu(rdmdio->addr_2); +	value1 = le32_to_cpu(rdmdio->value_1); +	stride1 = le32_to_cpu(rdmdio->stride_1); +	stride2 = le32_to_cpu(rdmdio->stride_2); +	count = le32_to_cpu(rdmdio->count); + +	poll = le32_to_cpu(rdmdio->poll); +	mask = le32_to_cpu(rdmdio->mask); +	value2 = le32_to_cpu(rdmdio->value_2); + +	addr3 = addr1 + stride1; + +	for (loop_cnt = 0; loop_cnt < count; loop_cnt++) { +		rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, +							 addr3, mask); +		if (rval) +			goto exit_process_rdmdio; + +		addr4 = addr2 - stride1; +		rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4, +					     value2); +		if (rval) +			goto exit_process_rdmdio; + +		addr5 = addr2 - (2 * stride1); +		rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5, +					     value1); +		if (rval) +			goto exit_process_rdmdio; + +		addr6 = addr2 - (3 * stride1); +		rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, +					     addr6, 0x2); +		if (rval) +			goto exit_process_rdmdio; + +		rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, +							 addr3, mask); +		if (rval) +			goto exit_process_rdmdio; + +		addr7 = addr2 - (4 * stride1); +		rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, +						      mask, addr7, &data); +		if (rval) +			goto exit_process_rdmdio; + +		selval = (value2 << 18) | (value1 << 2) | 2; + +		stride2 = le32_to_cpu(rdmdio->stride_2); +		*data_ptr++ = cpu_to_le32(selval); +		*data_ptr++ = cpu_to_le32(data); + +		value1 = value1 + stride2; +		*d_ptr = data_ptr; +	} + +exit_process_rdmdio: +	return rval; +} + +static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha, +				struct qla8xxx_minidump_entry_hdr *entry_hdr, +				uint32_t **d_ptr) +{ +	uint32_t addr1, addr2, value1, value2, poll, mask, r_value; +	struct qla8044_minidump_entry_pollwr *pollwr_hdr; +	uint32_t wait_count = 0; +	uint32_t rval = QLA_SUCCESS; + +	pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; +	addr1 = le32_to_cpu(pollwr_hdr->addr_1); +	addr2 = le32_to_cpu(pollwr_hdr->addr_2); +	value1 = le32_to_cpu(pollwr_hdr->value_1); +	value2 = le32_to_cpu(pollwr_hdr->value_2); + +	poll = le32_to_cpu(pollwr_hdr->poll); +	mask = le32_to_cpu(pollwr_hdr->mask); + +	while (wait_count < poll) { +		ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); + +		if ((r_value & poll) != 0) +			break; + +		wait_count++; +	} + +	if (wait_count == poll) { +		ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__); +		rval = QLA_ERROR; +		goto exit_process_pollwr; +	} + +	ha->isp_ops->wr_reg_indirect(ha, addr2, value2); +	ha->isp_ops->wr_reg_indirect(ha, addr1, value1); + +	wait_count = 0; +	while (wait_count < poll) { +		ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value); + +		if ((r_value & poll) != 0) +			break; +		wait_count++; +	} + +exit_process_pollwr: +	return rval; +} +  static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,  				struct qla8xxx_minidump_entry_hdr *entry_hdr,  				uint32_t **d_ptr) @@ -2590,6 +3011,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)  	uint64_t now;  	uint32_t timestamp; +	ha->fw_dump_skip_size = 0;  	if (!ha->fw_dump) {  		ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",  			   __func__, ha->host_no); @@ -2747,6 +3169,24 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)  			if (rval != QLA_SUCCESS)  				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);  			break; +		case QLA8044_RDDFE: +			rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr, +								&data_ptr); +			if (rval != QLA_SUCCESS) +				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); +			break; +		case QLA8044_RDMDIO: +			rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr, +								 &data_ptr); +			if (rval != QLA_SUCCESS) +				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); +			break; +		case QLA8044_POLLWR: +			rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr, +								 &data_ptr); +			if (rval != QLA_SUCCESS) +				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); +			break;  		case QLA8XXX_RDNOP:  		default:  			qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); @@ -2761,7 +3201,7 @@ skip_nxt_entry:  				 entry_hdr->entry_size);  	} -	if (data_collected != ha->fw_dump_size) { +	if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) {  		ql4_printk(KERN_INFO, ha,  			   "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",  			   data_collected, ha->fw_dump_size); @@ -2820,63 +3260,35 @@ void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)  int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)  {  	int rval = QLA_ERROR; -	int i, timeout; -	uint32_t old_count, count, idc_ctrl; -	int need_reset = 0, peg_stuck = 1; +	int i; +	uint32_t old_count, count; +	int need_reset = 0;  	need_reset = ha->isp_ops->need_reset(ha); -	old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); - -	for (i = 0; i < 10; i++) { -		timeout = msleep_interruptible(200); -		if (timeout) { -			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, -					    QLA8XXX_DEV_FAILED); -			return rval; -		} - -		count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); -		if (count != old_count) -			peg_stuck = 0; -	}  	if (need_reset) {  		/* We are trying to perform a recovery here. */ -		if (peg_stuck) +		if (test_bit(AF_FW_RECOVERY, &ha->flags))  			ha->isp_ops->rom_lock_recovery(ha); -		goto dev_initialize;  	} else  { -		/* Start of day for this ha context. */ -		if (peg_stuck) { -			/* Either we are the first or recovery in progress. */ -			ha->isp_ops->rom_lock_recovery(ha); -			goto dev_initialize; -		} else { -			/* Firmware already running. */ -			rval = QLA_SUCCESS; -			goto dev_ready; +		old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER); +		for (i = 0; i < 10; i++) { +			msleep(200); +			count = qla4_8xxx_rd_direct(ha, +						    QLA8XXX_PEG_ALIVE_COUNTER); +			if (count != old_count) { +				rval = QLA_SUCCESS; +				goto dev_ready; +			}  		} +		ha->isp_ops->rom_lock_recovery(ha);  	} -dev_initialize:  	/* set to DEV_INITIALIZING */  	ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");  	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,  			    QLA8XXX_DEV_INITIALIZING); -	/* -	 * For ISP8324 and ISP8042, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, -	 * reset it after device goes to INIT state. -	 */ -	if (is_qla8032(ha) || is_qla8042(ha)) { -		idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL); -		if (idc_ctrl & GRACEFUL_RESET_BIT1) { -			qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, -					 (idc_ctrl & ~GRACEFUL_RESET_BIT1)); -			set_bit(AF_83XX_NO_FW_DUMP, &ha->flags); -		} -	} -  	ha->isp_ops->idc_unlock(ha);  	if (is_qla8022(ha)) @@ -3209,6 +3621,10 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)  	retval = qla4_8xxx_device_state_handler(ha); +	/* Initialize request and response queues. */ +	if (retval == QLA_SUCCESS) +		qla4xxx_init_rings(ha); +  	if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))  		retval = qla4xxx_request_irqs(ha); @@ -3836,3 +4252,24 @@ qla4_8xxx_enable_msix(struct scsi_qla_host *ha)  msix_out:  	return ret;  } + +int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha) +{ +	int status = QLA_SUCCESS; + +	/* Dont retry adapter initialization if IRQ allocation failed */ +	if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) { +		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n", +			   __func__); +		status = QLA_ERROR; +		goto exit_init_adapter_failure; +	} + +	/* Since interrupts are registered in start_firmware for +	 * 8xxx, release them here if initialize_adapter fails +	 * and retry adapter initialization */ +	qla4xxx_free_irqs(ha); + +exit_init_adapter_failure: +	return status; +} diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h index 14500a0f62c..337d9fcf641 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.h +++ b/drivers/scsi/qla4xxx/ql4_nx.h @@ -858,6 +858,9 @@ struct crb_addr_pair {  #define QLA83XX_POLLRD	35  #define QLA83XX_RDMUX2	36  #define QLA83XX_POLLRDMWR  37 +#define QLA8044_RDDFE	38 +#define QLA8044_RDMDIO	39 +#define QLA8044_POLLWR	40  #define QLA8XXX_RDROM	71  #define QLA8XXX_RDMEM	72  #define QLA8XXX_CNTRL	98 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index f8a0a26a3cd..32020637620 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -83,12 +83,12 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo,  		" Target Session Recovery Timeout.\n"  		"\t\t  Default: 120 sec."); -int ql4xmdcapmask = 0x1F; +int ql4xmdcapmask = 0;  module_param(ql4xmdcapmask, int, S_IRUGO);  MODULE_PARM_DESC(ql4xmdcapmask,  		 " Set the Minidump driver capture mask level.\n" -		 "\t\t  Default is 0x1F.\n" -		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F"); +		 "\t\t  Default is 0 (firmware default capture mask)\n" +		 "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");  int ql4xenablemd = 1;  module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR); @@ -149,6 +149,9 @@ static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,  static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,  				 uint32_t *num_entries, char *buf);  static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx); +static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void  *data, +				  int len); +static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len);  /*   * SCSI host template entry points @@ -252,6 +255,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {  	.send_ping		= qla4xxx_send_ping,  	.get_chap		= qla4xxx_get_chap_list,  	.delete_chap		= qla4xxx_delete_chap, +	.set_chap		= qla4xxx_set_chap_entry,  	.get_flashnode_param	= qla4xxx_sysfs_ddb_get_param,  	.set_flashnode_param	= qla4xxx_sysfs_ddb_set_param,  	.new_flashnode		= qla4xxx_sysfs_ddb_add, @@ -259,6 +263,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {  	.login_flashnode	= qla4xxx_sysfs_ddb_login,  	.logout_flashnode	= qla4xxx_sysfs_ddb_logout,  	.logout_flashnode_sid	= qla4xxx_sysfs_ddb_logout_sid, +	.get_host_stats		= qla4xxx_get_host_stats,  };  static struct scsi_transport_template *qla4xxx_scsi_transport; @@ -416,6 +421,7 @@ static umode_t qla4_attr_is_visible(int param_type, int param)  		case ISCSI_PARAM_EXP_STATSN:  		case ISCSI_PARAM_DISCOVERY_PARENT_IDX:  		case ISCSI_PARAM_DISCOVERY_PARENT_TYPE: +		case ISCSI_PARAM_LOCAL_IPADDR:  			return S_IRUGO;  		default:  			return 0; @@ -437,6 +443,65 @@ static umode_t qla4_attr_is_visible(int param_type, int param)  		case ISCSI_NET_PARAM_VLAN_ENABLED:  		case ISCSI_NET_PARAM_MTU:  		case ISCSI_NET_PARAM_PORT: +		case ISCSI_NET_PARAM_IPADDR_STATE: +		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: +		case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: +		case ISCSI_NET_PARAM_DELAYED_ACK_EN: +		case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: +		case ISCSI_NET_PARAM_TCP_WSF_DISABLE: +		case ISCSI_NET_PARAM_TCP_WSF: +		case ISCSI_NET_PARAM_TCP_TIMER_SCALE: +		case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: +		case ISCSI_NET_PARAM_CACHE_ID: +		case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: +		case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: +		case ISCSI_NET_PARAM_IPV4_TOS_EN: +		case ISCSI_NET_PARAM_IPV4_TOS: +		case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: +		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: +		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: +		case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: +		case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: +		case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: +		case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: +		case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: +		case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: +		case ISCSI_NET_PARAM_REDIRECT_EN: +		case ISCSI_NET_PARAM_IPV4_TTL: +		case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: +		case ISCSI_NET_PARAM_IPV6_MLD_EN: +		case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: +		case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: +		case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: +		case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: +		case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: +		case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: +		case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: +		case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: +			return S_IRUGO; +		default: +			return 0; +		} +	case ISCSI_IFACE_PARAM: +		switch (param) { +		case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: +		case ISCSI_IFACE_PARAM_HDRDGST_EN: +		case ISCSI_IFACE_PARAM_DATADGST_EN: +		case ISCSI_IFACE_PARAM_IMM_DATA_EN: +		case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: +		case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: +		case ISCSI_IFACE_PARAM_PDU_INORDER_EN: +		case ISCSI_IFACE_PARAM_ERL: +		case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: +		case ISCSI_IFACE_PARAM_FIRST_BURST: +		case ISCSI_IFACE_PARAM_MAX_R2T: +		case ISCSI_IFACE_PARAM_MAX_BURST: +		case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: +		case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: +		case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: +		case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: +		case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: +		case ISCSI_IFACE_PARAM_INITIATOR_NAME:  			return S_IRUGO;  		default:  			return 0; @@ -508,6 +573,154 @@ static umode_t qla4_attr_is_visible(int param_type, int param)  	return 0;  } +/** + * qla4xxx_create chap_list - Create CHAP list from FLASH + * @ha: pointer to adapter structure + * + * Read flash and make a list of CHAP entries, during login when a CHAP entry + * is received, it will be checked in this list. If entry exist then the CHAP + * entry index is set in the DDB. If CHAP entry does not exist in this list + * then a new entry is added in FLASH in CHAP table and the index obtained is + * used in the DDB. + **/ +static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) +{ +	int rval = 0; +	uint8_t *chap_flash_data = NULL; +	uint32_t offset; +	dma_addr_t chap_dma; +	uint32_t chap_size = 0; + +	if (is_qla40XX(ha)) +		chap_size = MAX_CHAP_ENTRIES_40XX * +			    sizeof(struct ql4_chap_table); +	else	/* Single region contains CHAP info for both +		 * ports which is divided into half for each port. +		 */ +		chap_size = ha->hw.flt_chap_size / 2; + +	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, +					     &chap_dma, GFP_KERNEL); +	if (!chap_flash_data) { +		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); +		return; +	} + +	if (is_qla40XX(ha)) { +		offset = FLASH_CHAP_OFFSET; +	} else { +		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); +		if (ha->port_num == 1) +			offset += chap_size; +	} + +	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); +	if (rval != QLA_SUCCESS) +		goto exit_chap_list; + +	if (ha->chap_list == NULL) +		ha->chap_list = vmalloc(chap_size); +	if (ha->chap_list == NULL) { +		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); +		goto exit_chap_list; +	} + +	memset(ha->chap_list, 0, chap_size); +	memcpy(ha->chap_list, chap_flash_data, chap_size); + +exit_chap_list: +	dma_free_coherent(&ha->pdev->dev, chap_size, chap_flash_data, chap_dma); +} + +static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha, +				     int16_t chap_index, +				     struct ql4_chap_table **chap_entry) +{ +	int rval = QLA_ERROR; +	int max_chap_entries; + +	if (!ha->chap_list) { +		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); +		rval = QLA_ERROR; +		goto exit_get_chap; +	} + +	if (is_qla80XX(ha)) +		max_chap_entries = (ha->hw.flt_chap_size / 2) / +				   sizeof(struct ql4_chap_table); +	else +		max_chap_entries = MAX_CHAP_ENTRIES_40XX; + +	if (chap_index > max_chap_entries) { +		ql4_printk(KERN_ERR, ha, "Invalid Chap index\n"); +		rval = QLA_ERROR; +		goto exit_get_chap; +	} + +	*chap_entry = (struct ql4_chap_table *)ha->chap_list + chap_index; +	if ((*chap_entry)->cookie != +	     __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { +		rval = QLA_ERROR; +		*chap_entry = NULL; +	} else { +		rval = QLA_SUCCESS; +	} + +exit_get_chap: +	return rval; +} + +/** + * qla4xxx_find_free_chap_index - Find the first free chap index + * @ha: pointer to adapter structure + * @chap_index: CHAP index to be returned + * + * Find the first free chap index available in the chap table + * + * Note: Caller should acquire the chap lock before getting here. + **/ +static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha, +					uint16_t *chap_index) +{ +	int i, rval; +	int free_index = -1; +	int max_chap_entries = 0; +	struct ql4_chap_table *chap_table; + +	if (is_qla80XX(ha)) +		max_chap_entries = (ha->hw.flt_chap_size / 2) / +						sizeof(struct ql4_chap_table); +	else +		max_chap_entries = MAX_CHAP_ENTRIES_40XX; + +	if (!ha->chap_list) { +		ql4_printk(KERN_ERR, ha, "CHAP table cache is empty!\n"); +		rval = QLA_ERROR; +		goto exit_find_chap; +	} + +	for (i = 0; i < max_chap_entries; i++) { +		chap_table = (struct ql4_chap_table *)ha->chap_list + i; + +		if ((chap_table->cookie != +		    __constant_cpu_to_le16(CHAP_VALID_COOKIE)) && +		   (i > MAX_RESRV_CHAP_IDX)) { +				free_index = i; +				break; +		} +	} + +	if (free_index != -1) { +		*chap_index = free_index; +		rval = QLA_SUCCESS; +	} else { +		rval = QLA_ERROR; +	} + +exit_find_chap: +	return rval; +} +  static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,  				  uint32_t *num_entries, char *buf)  { @@ -532,6 +745,8 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,  		goto exit_get_chap_list;  	} +	qla4xxx_create_chap_list(ha); +  	chap_rec = (struct iscsi_chap_rec *) buf;  	mutex_lock(&ha->chap_sem);  	for (i = chap_tbl_idx; i < max_chap_entries; i++) { @@ -691,113 +906,754 @@ exit_delete_chap:  	return ret;  } +/** + * qla4xxx_set_chap_entry - Make chap entry with given information + * @shost: pointer to host + * @data: chap info - credentials, index and type to make chap entry + * @len: length of data + * + * Add or update chap entry with the given information + **/ +static int qla4xxx_set_chap_entry(struct Scsi_Host *shost, void *data, int len) +{ +	struct scsi_qla_host *ha = to_qla_host(shost); +	struct iscsi_chap_rec chap_rec; +	struct ql4_chap_table *chap_entry = NULL; +	struct iscsi_param_info *param_info; +	struct nlattr *attr; +	int max_chap_entries = 0; +	int type; +	int rem = len; +	int rc = 0; +	int size; + +	memset(&chap_rec, 0, sizeof(chap_rec)); + +	nla_for_each_attr(attr, data, len, rem) { +		param_info = nla_data(attr); + +		switch (param_info->param) { +		case ISCSI_CHAP_PARAM_INDEX: +			chap_rec.chap_tbl_idx = *(uint16_t *)param_info->value; +			break; +		case ISCSI_CHAP_PARAM_CHAP_TYPE: +			chap_rec.chap_type = param_info->value[0]; +			break; +		case ISCSI_CHAP_PARAM_USERNAME: +			size = min_t(size_t, sizeof(chap_rec.username), +				     param_info->len); +			memcpy(chap_rec.username, param_info->value, size); +			break; +		case ISCSI_CHAP_PARAM_PASSWORD: +			size = min_t(size_t, sizeof(chap_rec.password), +				     param_info->len); +			memcpy(chap_rec.password, param_info->value, size); +			break; +		case ISCSI_CHAP_PARAM_PASSWORD_LEN: +			chap_rec.password_length = param_info->value[0]; +			break; +		default: +			ql4_printk(KERN_ERR, ha, +				   "%s: No such sysfs attribute\n", __func__); +			rc = -ENOSYS; +			goto exit_set_chap; +		}; +	} + +	if (chap_rec.chap_type == CHAP_TYPE_IN) +		type = BIDI_CHAP; +	else +		type = LOCAL_CHAP; + +	if (is_qla80XX(ha)) +		max_chap_entries = (ha->hw.flt_chap_size / 2) / +				   sizeof(struct ql4_chap_table); +	else +		max_chap_entries = MAX_CHAP_ENTRIES_40XX; + +	mutex_lock(&ha->chap_sem); +	if (chap_rec.chap_tbl_idx < max_chap_entries) { +		rc = qla4xxx_get_chap_by_index(ha, chap_rec.chap_tbl_idx, +					       &chap_entry); +		if (!rc) { +			if (!(type == qla4xxx_get_chap_type(chap_entry))) { +				ql4_printk(KERN_INFO, ha, +					   "Type mismatch for CHAP entry %d\n", +					   chap_rec.chap_tbl_idx); +				rc = -EINVAL; +				goto exit_unlock_chap; +			} + +			/* If chap index is in use then don't modify it */ +			rc = qla4xxx_is_chap_active(shost, +						    chap_rec.chap_tbl_idx); +			if (rc) { +				ql4_printk(KERN_INFO, ha, +					   "CHAP entry %d is in use\n", +					   chap_rec.chap_tbl_idx); +				rc = -EBUSY; +				goto exit_unlock_chap; +			} +		} +	} else { +		rc = qla4xxx_find_free_chap_index(ha, &chap_rec.chap_tbl_idx); +		if (rc) { +			ql4_printk(KERN_INFO, ha, "CHAP entry not available\n"); +			rc = -EBUSY; +			goto exit_unlock_chap; +		} +	} + +	rc = qla4xxx_set_chap(ha, chap_rec.username, chap_rec.password, +			      chap_rec.chap_tbl_idx, type); + +exit_unlock_chap: +	mutex_unlock(&ha->chap_sem); + +exit_set_chap: +	return rc; +} + + +static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len) +{ +	struct scsi_qla_host *ha = to_qla_host(shost); +	struct iscsi_offload_host_stats *host_stats = NULL; +	int host_stats_size; +	int ret = 0; +	int ddb_idx = 0; +	struct ql_iscsi_stats *ql_iscsi_stats = NULL; +	int stats_size; +	dma_addr_t iscsi_stats_dma; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "Func: %s\n", __func__)); + +	host_stats_size = sizeof(struct iscsi_offload_host_stats); + +	if (host_stats_size != len) { +		ql4_printk(KERN_INFO, ha, "%s: host_stats size mismatch expected = %d, is = %d\n", +			   __func__, len, host_stats_size); +		ret = -EINVAL; +		goto exit_host_stats; +	} +	host_stats = (struct iscsi_offload_host_stats *)buf; + +	if (!buf) { +		ret = -ENOMEM; +		goto exit_host_stats; +	} + +	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); + +	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, +					    &iscsi_stats_dma, GFP_KERNEL); +	if (!ql_iscsi_stats) { +		ql4_printk(KERN_ERR, ha, +			   "Unable to allocate memory for iscsi stats\n"); +		goto exit_host_stats; +	} + +	ret =  qla4xxx_get_mgmt_data(ha, ddb_idx, stats_size, +				     iscsi_stats_dma); +	if (ret != QLA_SUCCESS) { +		ql4_printk(KERN_ERR, ha, +			   "Unable to retrieve iscsi stats\n"); +		goto exit_host_stats; +	} +	host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames); +	host_stats->mactx_bytes = le64_to_cpu(ql_iscsi_stats->mac_tx_bytes); +	host_stats->mactx_multicast_frames = +			le64_to_cpu(ql_iscsi_stats->mac_tx_multicast_frames); +	host_stats->mactx_broadcast_frames = +			le64_to_cpu(ql_iscsi_stats->mac_tx_broadcast_frames); +	host_stats->mactx_pause_frames = +			le64_to_cpu(ql_iscsi_stats->mac_tx_pause_frames); +	host_stats->mactx_control_frames = +			le64_to_cpu(ql_iscsi_stats->mac_tx_control_frames); +	host_stats->mactx_deferral = +			le64_to_cpu(ql_iscsi_stats->mac_tx_deferral); +	host_stats->mactx_excess_deferral = +			le64_to_cpu(ql_iscsi_stats->mac_tx_excess_deferral); +	host_stats->mactx_late_collision = +			le64_to_cpu(ql_iscsi_stats->mac_tx_late_collision); +	host_stats->mactx_abort	= le64_to_cpu(ql_iscsi_stats->mac_tx_abort); +	host_stats->mactx_single_collision = +			le64_to_cpu(ql_iscsi_stats->mac_tx_single_collision); +	host_stats->mactx_multiple_collision = +			le64_to_cpu(ql_iscsi_stats->mac_tx_multiple_collision); +	host_stats->mactx_collision = +			le64_to_cpu(ql_iscsi_stats->mac_tx_collision); +	host_stats->mactx_frames_dropped = +			le64_to_cpu(ql_iscsi_stats->mac_tx_frames_dropped); +	host_stats->mactx_jumbo_frames = +			le64_to_cpu(ql_iscsi_stats->mac_tx_jumbo_frames); +	host_stats->macrx_frames = le64_to_cpu(ql_iscsi_stats->mac_rx_frames); +	host_stats->macrx_bytes = le64_to_cpu(ql_iscsi_stats->mac_rx_bytes); +	host_stats->macrx_unknown_control_frames = +		le64_to_cpu(ql_iscsi_stats->mac_rx_unknown_control_frames); +	host_stats->macrx_pause_frames = +			le64_to_cpu(ql_iscsi_stats->mac_rx_pause_frames); +	host_stats->macrx_control_frames = +			le64_to_cpu(ql_iscsi_stats->mac_rx_control_frames); +	host_stats->macrx_dribble = +			le64_to_cpu(ql_iscsi_stats->mac_rx_dribble); +	host_stats->macrx_frame_length_error = +			le64_to_cpu(ql_iscsi_stats->mac_rx_frame_length_error); +	host_stats->macrx_jabber = le64_to_cpu(ql_iscsi_stats->mac_rx_jabber); +	host_stats->macrx_carrier_sense_error = +		le64_to_cpu(ql_iscsi_stats->mac_rx_carrier_sense_error); +	host_stats->macrx_frame_discarded = +			le64_to_cpu(ql_iscsi_stats->mac_rx_frame_discarded); +	host_stats->macrx_frames_dropped = +			le64_to_cpu(ql_iscsi_stats->mac_rx_frames_dropped); +	host_stats->mac_crc_error = le64_to_cpu(ql_iscsi_stats->mac_crc_error); +	host_stats->mac_encoding_error = +			le64_to_cpu(ql_iscsi_stats->mac_encoding_error); +	host_stats->macrx_length_error_large = +			le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_large); +	host_stats->macrx_length_error_small = +			le64_to_cpu(ql_iscsi_stats->mac_rx_length_error_small); +	host_stats->macrx_multicast_frames = +			le64_to_cpu(ql_iscsi_stats->mac_rx_multicast_frames); +	host_stats->macrx_broadcast_frames = +			le64_to_cpu(ql_iscsi_stats->mac_rx_broadcast_frames); +	host_stats->iptx_packets = le64_to_cpu(ql_iscsi_stats->ip_tx_packets); +	host_stats->iptx_bytes = le64_to_cpu(ql_iscsi_stats->ip_tx_bytes); +	host_stats->iptx_fragments = +			le64_to_cpu(ql_iscsi_stats->ip_tx_fragments); +	host_stats->iprx_packets = le64_to_cpu(ql_iscsi_stats->ip_rx_packets); +	host_stats->iprx_bytes = le64_to_cpu(ql_iscsi_stats->ip_rx_bytes); +	host_stats->iprx_fragments = +			le64_to_cpu(ql_iscsi_stats->ip_rx_fragments); +	host_stats->ip_datagram_reassembly = +			le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly); +	host_stats->ip_invalid_address_error = +			le64_to_cpu(ql_iscsi_stats->ip_invalid_address_error); +	host_stats->ip_error_packets = +			le64_to_cpu(ql_iscsi_stats->ip_error_packets); +	host_stats->ip_fragrx_overlap = +			le64_to_cpu(ql_iscsi_stats->ip_fragrx_overlap); +	host_stats->ip_fragrx_outoforder = +			le64_to_cpu(ql_iscsi_stats->ip_fragrx_outoforder); +	host_stats->ip_datagram_reassembly_timeout = +		le64_to_cpu(ql_iscsi_stats->ip_datagram_reassembly_timeout); +	host_stats->ipv6tx_packets = +			le64_to_cpu(ql_iscsi_stats->ipv6_tx_packets); +	host_stats->ipv6tx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_tx_bytes); +	host_stats->ipv6tx_fragments = +			le64_to_cpu(ql_iscsi_stats->ipv6_tx_fragments); +	host_stats->ipv6rx_packets = +			le64_to_cpu(ql_iscsi_stats->ipv6_rx_packets); +	host_stats->ipv6rx_bytes = le64_to_cpu(ql_iscsi_stats->ipv6_rx_bytes); +	host_stats->ipv6rx_fragments = +			le64_to_cpu(ql_iscsi_stats->ipv6_rx_fragments); +	host_stats->ipv6_datagram_reassembly = +			le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly); +	host_stats->ipv6_invalid_address_error = +		le64_to_cpu(ql_iscsi_stats->ipv6_invalid_address_error); +	host_stats->ipv6_error_packets = +			le64_to_cpu(ql_iscsi_stats->ipv6_error_packets); +	host_stats->ipv6_fragrx_overlap = +			le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_overlap); +	host_stats->ipv6_fragrx_outoforder = +			le64_to_cpu(ql_iscsi_stats->ipv6_fragrx_outoforder); +	host_stats->ipv6_datagram_reassembly_timeout = +		le64_to_cpu(ql_iscsi_stats->ipv6_datagram_reassembly_timeout); +	host_stats->tcptx_segments = +			le64_to_cpu(ql_iscsi_stats->tcp_tx_segments); +	host_stats->tcptx_bytes	= le64_to_cpu(ql_iscsi_stats->tcp_tx_bytes); +	host_stats->tcprx_segments = +			le64_to_cpu(ql_iscsi_stats->tcp_rx_segments); +	host_stats->tcprx_byte = le64_to_cpu(ql_iscsi_stats->tcp_rx_byte); +	host_stats->tcp_duplicate_ack_retx = +			le64_to_cpu(ql_iscsi_stats->tcp_duplicate_ack_retx); +	host_stats->tcp_retx_timer_expired = +			le64_to_cpu(ql_iscsi_stats->tcp_retx_timer_expired); +	host_stats->tcprx_duplicate_ack	= +			le64_to_cpu(ql_iscsi_stats->tcp_rx_duplicate_ack); +	host_stats->tcprx_pure_ackr = +			le64_to_cpu(ql_iscsi_stats->tcp_rx_pure_ackr); +	host_stats->tcptx_delayed_ack = +			le64_to_cpu(ql_iscsi_stats->tcp_tx_delayed_ack); +	host_stats->tcptx_pure_ack = +			le64_to_cpu(ql_iscsi_stats->tcp_tx_pure_ack); +	host_stats->tcprx_segment_error = +			le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_error); +	host_stats->tcprx_segment_outoforder = +			le64_to_cpu(ql_iscsi_stats->tcp_rx_segment_outoforder); +	host_stats->tcprx_window_probe = +			le64_to_cpu(ql_iscsi_stats->tcp_rx_window_probe); +	host_stats->tcprx_window_update = +			le64_to_cpu(ql_iscsi_stats->tcp_rx_window_update); +	host_stats->tcptx_window_probe_persist = +		le64_to_cpu(ql_iscsi_stats->tcp_tx_window_probe_persist); +	host_stats->ecc_error_correction = +			le64_to_cpu(ql_iscsi_stats->ecc_error_correction); +	host_stats->iscsi_pdu_tx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_tx); +	host_stats->iscsi_data_bytes_tx = +			le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_tx); +	host_stats->iscsi_pdu_rx = le64_to_cpu(ql_iscsi_stats->iscsi_pdu_rx); +	host_stats->iscsi_data_bytes_rx	= +			le64_to_cpu(ql_iscsi_stats->iscsi_data_bytes_rx); +	host_stats->iscsi_io_completed = +			le64_to_cpu(ql_iscsi_stats->iscsi_io_completed); +	host_stats->iscsi_unexpected_io_rx = +			le64_to_cpu(ql_iscsi_stats->iscsi_unexpected_io_rx); +	host_stats->iscsi_format_error = +			le64_to_cpu(ql_iscsi_stats->iscsi_format_error); +	host_stats->iscsi_hdr_digest_error = +			le64_to_cpu(ql_iscsi_stats->iscsi_hdr_digest_error); +	host_stats->iscsi_data_digest_error = +			le64_to_cpu(ql_iscsi_stats->iscsi_data_digest_error); +	host_stats->iscsi_sequence_error = +			le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error); +exit_host_stats: +	if (ql_iscsi_stats) +		dma_free_coherent(&ha->pdev->dev, host_stats_size, +				  ql_iscsi_stats, iscsi_stats_dma); + +	ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n", +		   __func__); +	return ret; +} +  static int qla4xxx_get_iface_param(struct iscsi_iface *iface,  				   enum iscsi_param_type param_type,  				   int param, char *buf)  {  	struct Scsi_Host *shost = iscsi_iface_to_shost(iface);  	struct scsi_qla_host *ha = to_qla_host(shost); +	int ival; +	char *pval = NULL;  	int len = -ENOSYS; -	if (param_type != ISCSI_NET_PARAM) -		return -ENOSYS; +	if (param_type == ISCSI_NET_PARAM) { +		switch (param) { +		case ISCSI_NET_PARAM_IPV4_ADDR: +			len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); +			break; +		case ISCSI_NET_PARAM_IPV4_SUBNET: +			len = sprintf(buf, "%pI4\n", +				      &ha->ip_config.subnet_mask); +			break; +		case ISCSI_NET_PARAM_IPV4_GW: +			len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); +			break; +		case ISCSI_NET_PARAM_IFACE_ENABLE: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { +				OP_STATE(ha->ip_config.ipv4_options, +					 IPOPT_IPV4_PROTOCOL_ENABLE, pval); +			} else { +				OP_STATE(ha->ip_config.ipv6_options, +					 IPV6_OPT_IPV6_PROTOCOL_ENABLE, pval); +			} -	switch (param) { -	case ISCSI_NET_PARAM_IPV4_ADDR: -		len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); -		break; -	case ISCSI_NET_PARAM_IPV4_SUBNET: -		len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask); -		break; -	case ISCSI_NET_PARAM_IPV4_GW: -		len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); -		break; -	case ISCSI_NET_PARAM_IFACE_ENABLE: -		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) -			len = sprintf(buf, "%s\n", -				      (ha->ip_config.ipv4_options & -				       IPOPT_IPV4_PROTOCOL_ENABLE) ? -				      "enabled" : "disabled"); -		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_BOOTPROTO:  			len = sprintf(buf, "%s\n", -				      (ha->ip_config.ipv6_options & -				       IPV6_OPT_IPV6_PROTOCOL_ENABLE) ? -				       "enabled" : "disabled"); -		break; -	case ISCSI_NET_PARAM_IPV4_BOOTPROTO: -		len = sprintf(buf, "%s\n", -			      (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ? -			      "dhcp" : "static"); -		break; -	case ISCSI_NET_PARAM_IPV6_ADDR: -		if (iface->iface_num == 0) -			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0); -		if (iface->iface_num == 1) -			len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1); -		break; -	case ISCSI_NET_PARAM_IPV6_LINKLOCAL: -		len = sprintf(buf, "%pI6\n", -			      &ha->ip_config.ipv6_link_local_addr); -		break; -	case ISCSI_NET_PARAM_IPV6_ROUTER: -		len = sprintf(buf, "%pI6\n", -			      &ha->ip_config.ipv6_default_router_addr); -		break; -	case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: -		len = sprintf(buf, "%s\n", -			      (ha->ip_config.ipv6_addl_options & -			       IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? -			       "nd" : "static"); -		break; -	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: -		len = sprintf(buf, "%s\n", -			      (ha->ip_config.ipv6_addl_options & -			       IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? -			       "auto" : "static"); -		break; -	case ISCSI_NET_PARAM_VLAN_ID: -		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) +				      (ha->ip_config.tcp_options & +				       TCPOPT_DHCP_ENABLE) ? +				      "dhcp" : "static"); +			break; +		case ISCSI_NET_PARAM_IPV6_ADDR: +			if (iface->iface_num == 0) +				len = sprintf(buf, "%pI6\n", +					      &ha->ip_config.ipv6_addr0); +			if (iface->iface_num == 1) +				len = sprintf(buf, "%pI6\n", +					      &ha->ip_config.ipv6_addr1); +			break; +		case ISCSI_NET_PARAM_IPV6_LINKLOCAL: +			len = sprintf(buf, "%pI6\n", +				      &ha->ip_config.ipv6_link_local_addr); +			break; +		case ISCSI_NET_PARAM_IPV6_ROUTER: +			len = sprintf(buf, "%pI6\n", +				      &ha->ip_config.ipv6_default_router_addr); +			break; +		case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: +			pval = (ha->ip_config.ipv6_addl_options & +				IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? +				"nd" : "static"; + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: +			pval = (ha->ip_config.ipv6_addl_options & +				IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? +				"auto" : "static"; + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_VLAN_ID: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) +				ival = ha->ip_config.ipv4_vlan_tag & +				       ISCSI_MAX_VLAN_ID; +			else +				ival = ha->ip_config.ipv6_vlan_tag & +				       ISCSI_MAX_VLAN_ID; + +			len = sprintf(buf, "%d\n", ival); +			break; +		case ISCSI_NET_PARAM_VLAN_PRIORITY: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) +				ival = (ha->ip_config.ipv4_vlan_tag >> 13) & +				       ISCSI_MAX_VLAN_PRIORITY; +			else +				ival = (ha->ip_config.ipv6_vlan_tag >> 13) & +				       ISCSI_MAX_VLAN_PRIORITY; + +			len = sprintf(buf, "%d\n", ival); +			break; +		case ISCSI_NET_PARAM_VLAN_ENABLED: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { +				OP_STATE(ha->ip_config.ipv4_options, +					 IPOPT_VLAN_TAGGING_ENABLE, pval); +			} else { +				OP_STATE(ha->ip_config.ipv6_options, +					 IPV6_OPT_VLAN_TAGGING_ENABLE, pval); +			} +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_MTU: +			len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); +			break; +		case ISCSI_NET_PARAM_PORT: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) +				len = sprintf(buf, "%d\n", +					      ha->ip_config.ipv4_port); +			else +				len = sprintf(buf, "%d\n", +					      ha->ip_config.ipv6_port); +			break; +		case ISCSI_NET_PARAM_IPADDR_STATE: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { +				pval = iscsi_get_ipaddress_state_name( +						ha->ip_config.ipv4_addr_state); +			} else { +				if (iface->iface_num == 0) +					pval = iscsi_get_ipaddress_state_name( +						ha->ip_config.ipv6_addr0_state); +				else if (iface->iface_num == 1) +					pval = iscsi_get_ipaddress_state_name( +						ha->ip_config.ipv6_addr1_state); +			} + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE: +			pval = iscsi_get_ipaddress_state_name( +					ha->ip_config.ipv6_link_local_state); +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV6_ROUTER_STATE: +			pval = iscsi_get_router_state_name( +				      ha->ip_config.ipv6_default_router_state); +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_DELAYED_ACK_EN: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { +				OP_STATE(~ha->ip_config.tcp_options, +					 TCPOPT_DELAYED_ACK_DISABLE, pval); +			} else { +				OP_STATE(~ha->ip_config.ipv6_tcp_options, +					 IPV6_TCPOPT_DELAYED_ACK_DISABLE, pval); +			} +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { +				OP_STATE(~ha->ip_config.tcp_options, +					 TCPOPT_NAGLE_ALGO_DISABLE, pval); +			} else { +				OP_STATE(~ha->ip_config.ipv6_tcp_options, +					 IPV6_TCPOPT_NAGLE_ALGO_DISABLE, pval); +			} +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_TCP_WSF_DISABLE: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { +				OP_STATE(~ha->ip_config.tcp_options, +					 TCPOPT_WINDOW_SCALE_DISABLE, pval); +			} else { +				OP_STATE(~ha->ip_config.ipv6_tcp_options, +					 IPV6_TCPOPT_WINDOW_SCALE_DISABLE, +					 pval); +			} +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_TCP_WSF: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) +				len = sprintf(buf, "%d\n", +					      ha->ip_config.tcp_wsf); +			else +				len = sprintf(buf, "%d\n", +					      ha->ip_config.ipv6_tcp_wsf); +			break; +		case ISCSI_NET_PARAM_TCP_TIMER_SCALE: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) +				ival = (ha->ip_config.tcp_options & +					TCPOPT_TIMER_SCALE) >> 1; +			else +				ival = (ha->ip_config.ipv6_tcp_options & +					IPV6_TCPOPT_TIMER_SCALE) >> 1; + +			len = sprintf(buf, "%d\n", ival); +			break; +		case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { +				OP_STATE(ha->ip_config.tcp_options, +					 TCPOPT_TIMESTAMP_ENABLE, pval); +			} else { +				OP_STATE(ha->ip_config.ipv6_tcp_options, +					 IPV6_TCPOPT_TIMESTAMP_EN, pval); +			} +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_CACHE_ID: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) +				len = sprintf(buf, "%d\n", +					      ha->ip_config.ipv4_cache_id); +			else +				len = sprintf(buf, "%d\n", +					      ha->ip_config.ipv6_cache_id); +			break; +		case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: +			OP_STATE(ha->ip_config.tcp_options, +				 TCPOPT_DNS_SERVER_IP_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: +			OP_STATE(ha->ip_config.tcp_options, +				 TCPOPT_SLP_DA_INFO_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_TOS_EN: +			OP_STATE(ha->ip_config.ipv4_options, +				 IPOPT_IPV4_TOS_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_TOS: +			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_tos); +			break; +		case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: +			OP_STATE(ha->ip_config.ipv4_options, +				 IPOPT_GRAT_ARP_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: +			OP_STATE(ha->ip_config.ipv4_options, IPOPT_ALT_CID_EN, +				 pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: +			pval = (ha->ip_config.ipv4_alt_cid_len) ? +			       (char *)ha->ip_config.ipv4_alt_cid : ""; + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: +			OP_STATE(ha->ip_config.ipv4_options, +				 IPOPT_REQ_VID_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: +			OP_STATE(ha->ip_config.ipv4_options, +				 IPOPT_USE_VID_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: +			pval = (ha->ip_config.ipv4_vid_len) ? +			       (char *)ha->ip_config.ipv4_vid : ""; + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: +			OP_STATE(ha->ip_config.ipv4_options, +				 IPOPT_LEARN_IQN_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: +			OP_STATE(~ha->ip_config.ipv4_options, +				 IPOPT_FRAGMENTATION_DISABLE, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: +			OP_STATE(ha->ip_config.ipv4_options, +				 IPOPT_IN_FORWARD_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_REDIRECT_EN: +			if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { +				OP_STATE(ha->ip_config.ipv4_options, +					 IPOPT_ARP_REDIRECT_EN, pval); +			} else { +				OP_STATE(ha->ip_config.ipv6_options, +					 IPV6_OPT_REDIRECT_EN, pval); +			} +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV4_TTL: +			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_ttl); +			break; +		case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: +			OP_STATE(ha->ip_config.ipv6_options, +				 IPV6_OPT_GRAT_NEIGHBOR_ADV_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV6_MLD_EN: +			OP_STATE(ha->ip_config.ipv6_addl_options, +				 IPV6_ADDOPT_MLD_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: +			len = sprintf(buf, "%u\n", ha->ip_config.ipv6_flow_lbl); +			break; +		case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS:  			len = sprintf(buf, "%d\n", -				      (ha->ip_config.ipv4_vlan_tag & -				       ISCSI_MAX_VLAN_ID)); -		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) +				      ha->ip_config.ipv6_traffic_class); +			break; +		case ISCSI_NET_PARAM_IPV6_HOP_LIMIT:  			len = sprintf(buf, "%d\n", -				      (ha->ip_config.ipv6_vlan_tag & -				       ISCSI_MAX_VLAN_ID)); -		break; -	case ISCSI_NET_PARAM_VLAN_PRIORITY: -		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) +				      ha->ip_config.ipv6_hop_limit); +			break; +		case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO:  			len = sprintf(buf, "%d\n", -				      ((ha->ip_config.ipv4_vlan_tag >> 13) & -					ISCSI_MAX_VLAN_PRIORITY)); -		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) +				      ha->ip_config.ipv6_nd_reach_time); +			break; +		case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME:  			len = sprintf(buf, "%d\n", -				      ((ha->ip_config.ipv6_vlan_tag >> 13) & -					ISCSI_MAX_VLAN_PRIORITY)); -		break; -	case ISCSI_NET_PARAM_VLAN_ENABLED: -		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) -			len = sprintf(buf, "%s\n", -				      (ha->ip_config.ipv4_options & -				       IPOPT_VLAN_TAGGING_ENABLE) ? -				       "enabled" : "disabled"); -		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) -			len = sprintf(buf, "%s\n", -				      (ha->ip_config.ipv6_options & -				       IPV6_OPT_VLAN_TAGGING_ENABLE) ? -				       "enabled" : "disabled"); -		break; -	case ISCSI_NET_PARAM_MTU: -		len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); -		break; -	case ISCSI_NET_PARAM_PORT: -		if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) -			len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port); -		else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) -			len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port); -		break; -	default: -		len = -ENOSYS; +				      ha->ip_config.ipv6_nd_rexmit_timer); +			break; +		case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: +			len = sprintf(buf, "%d\n", +				      ha->ip_config.ipv6_nd_stale_timeout); +			break; +		case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: +			len = sprintf(buf, "%d\n", +				      ha->ip_config.ipv6_dup_addr_detect_count); +			break; +		case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: +			len = sprintf(buf, "%d\n", +				      ha->ip_config.ipv6_gw_advrt_mtu); +			break; +		default: +			len = -ENOSYS; +		} +	} else if (param_type == ISCSI_IFACE_PARAM) { +		switch (param) { +		case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: +			len = sprintf(buf, "%d\n", ha->ip_config.def_timeout); +			break; +		case ISCSI_IFACE_PARAM_HDRDGST_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_HEADER_DIGEST_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_DATADGST_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_DATA_DIGEST_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_IMM_DATA_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_IMMEDIATE_DATA_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_INITIAL_R2T_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_DATA_SEQ_INORDER_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_PDU_INORDER_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_DATA_PDU_INORDER_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_ERL: +			len = sprintf(buf, "%d\n", +				      (ha->ip_config.iscsi_options & +				       ISCSIOPTS_ERL)); +			break; +		case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: +			len = sprintf(buf, "%u\n", +				      ha->ip_config.iscsi_max_pdu_size * +				      BYTE_UNITS); +			break; +		case ISCSI_IFACE_PARAM_FIRST_BURST: +			len = sprintf(buf, "%u\n", +				      ha->ip_config.iscsi_first_burst_len * +				      BYTE_UNITS); +			break; +		case ISCSI_IFACE_PARAM_MAX_R2T: +			len = sprintf(buf, "%d\n", +				      ha->ip_config.iscsi_max_outstnd_r2t); +			break; +		case ISCSI_IFACE_PARAM_MAX_BURST: +			len = sprintf(buf, "%u\n", +				      ha->ip_config.iscsi_max_burst_len * +				      BYTE_UNITS); +			break; +		case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_CHAP_AUTH_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_BIDI_CHAP_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_DISCOVERY_AUTH_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_DISCOVERY_LOGOUT_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: +			OP_STATE(ha->ip_config.iscsi_options, +				 ISCSIOPTS_STRICT_LOGIN_COMP_EN, pval); + +			len = sprintf(buf, "%s\n", pval); +			break; +		case ISCSI_IFACE_PARAM_INITIATOR_NAME: +			len = sprintf(buf, "%s\n", ha->ip_config.iscsi_name); +			break; +		default: +			len = -ENOSYS; +		}  	}  	return len; @@ -814,16 +1670,13 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,  	struct sockaddr_in *addr;  	struct sockaddr_in6 *addr6; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));  	if (!shost) {  		ret = -ENXIO; -		printk(KERN_ERR "%s: shost is NULL\n", -		       __func__); +		pr_err("%s: shost is NULL\n", __func__);  		return ERR_PTR(ret);  	}  	ha = iscsi_host_priv(shost); -  	ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));  	if (!ep) {  		ret = -ENOMEM; @@ -843,6 +1696,9 @@ qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,  		addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;  		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,  				  (char *)&addr6->sin6_addr)); +	} else { +		ql4_printk(KERN_WARNING, ha, "%s: Invalid endpoint\n", +			   __func__);  	}  	qla_ep->host = shost; @@ -856,9 +1712,9 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)  	struct scsi_qla_host *ha;  	int ret = 0; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));  	qla_ep = ep->dd_data;  	ha = to_qla_host(qla_ep->host); +	DEBUG2(pr_info_ratelimited("%s: host: %ld\n", __func__, ha->host_no));  	if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))  		ret = 1; @@ -868,7 +1724,13 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)  static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)  { -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); +	struct qla_endpoint *qla_ep; +	struct scsi_qla_host *ha; + +	qla_ep = ep->dd_data; +	ha = to_qla_host(qla_ep->host); +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, +			  ha->host_no));  	iscsi_destroy_endpoint(ep);  } @@ -878,15 +1740,18 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,  {  	struct qla_endpoint *qla_ep = ep->dd_data;  	struct sockaddr *dst_addr; +	struct scsi_qla_host *ha; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); +	if (!qla_ep) +		return -ENOTCONN; + +	ha = to_qla_host(qla_ep->host); +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, +			  ha->host_no));  	switch (param) {  	case ISCSI_PARAM_CONN_PORT:  	case ISCSI_PARAM_CONN_ADDRESS: -		if (!qla_ep) -			return -ENOTCONN; -  		dst_addr = (struct sockaddr *)&qla_ep->dst_addr;  		if (!dst_addr)  			return -ENOTCONN; @@ -910,13 +1775,13 @@ static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,  	int ret;  	dma_addr_t iscsi_stats_dma; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); -  	cls_sess = iscsi_conn_to_session(cls_conn);  	sess = cls_sess->dd_data;  	ddb_entry = sess->dd_data;  	ha = ddb_entry->ha; +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, +			  ha->host_no));  	stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));  	/* Allocate memory */  	ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, @@ -1169,8 +2034,8 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,  				cpu_to_le16(  				  IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);  		else -			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for " -				   "IPv6 addr\n"); +			ql4_printk(KERN_ERR, ha, +				   "Invalid autocfg setting for IPv6 addr\n");  		break;  	case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:  		/* Autocfg applies to even interface */ @@ -1186,8 +2051,8 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,  			init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(  				       ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);  		else -			ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for " -				   "IPv6 linklocal addr\n"); +			ql4_printk(KERN_ERR, ha, +				   "Invalid autocfg setting for IPv6 linklocal addr\n");  		break;  	case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:  		/* Autocfg applies to even interface */ @@ -1236,6 +2101,136 @@ static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,  		init_fw_cb->ipv6_port =  				cpu_to_le16(*(uint16_t *)iface_param->value);  		break; +	case ISCSI_NET_PARAM_DELAYED_ACK_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) +			init_fw_cb->ipv6_tcp_opts |= +				cpu_to_le16(IPV6_TCPOPT_DELAYED_ACK_DISABLE); +		else +			init_fw_cb->ipv6_tcp_opts &= +				cpu_to_le16(~IPV6_TCPOPT_DELAYED_ACK_DISABLE & +					    0xFFFF); +		break; +	case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) +			init_fw_cb->ipv6_tcp_opts |= +				cpu_to_le16(IPV6_TCPOPT_NAGLE_ALGO_DISABLE); +		else +			init_fw_cb->ipv6_tcp_opts &= +				cpu_to_le16(~IPV6_TCPOPT_NAGLE_ALGO_DISABLE); +		break; +	case ISCSI_NET_PARAM_TCP_WSF_DISABLE: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) +			init_fw_cb->ipv6_tcp_opts |= +				cpu_to_le16(IPV6_TCPOPT_WINDOW_SCALE_DISABLE); +		else +			init_fw_cb->ipv6_tcp_opts &= +				cpu_to_le16(~IPV6_TCPOPT_WINDOW_SCALE_DISABLE); +		break; +	case ISCSI_NET_PARAM_TCP_WSF: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; +		break; +	case ISCSI_NET_PARAM_TCP_TIMER_SCALE: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_tcp_opts &= +					cpu_to_le16(~IPV6_TCPOPT_TIMER_SCALE); +		init_fw_cb->ipv6_tcp_opts |= +				cpu_to_le16((iface_param->value[0] << 1) & +					    IPV6_TCPOPT_TIMER_SCALE); +		break; +	case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv6_tcp_opts |= +				cpu_to_le16(IPV6_TCPOPT_TIMESTAMP_EN); +		else +			init_fw_cb->ipv6_tcp_opts &= +				cpu_to_le16(~IPV6_TCPOPT_TIMESTAMP_EN); +		break; +	case ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv6_opts |= +				cpu_to_le16(IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); +		else +			init_fw_cb->ipv6_opts &= +				cpu_to_le16(~IPV6_OPT_GRAT_NEIGHBOR_ADV_EN); +		break; +	case ISCSI_NET_PARAM_REDIRECT_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv6_opts |= +				cpu_to_le16(IPV6_OPT_REDIRECT_EN); +		else +			init_fw_cb->ipv6_opts &= +				cpu_to_le16(~IPV6_OPT_REDIRECT_EN); +		break; +	case ISCSI_NET_PARAM_IPV6_MLD_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv6_addtl_opts |= +				cpu_to_le16(IPV6_ADDOPT_MLD_EN); +		else +			init_fw_cb->ipv6_addtl_opts &= +				cpu_to_le16(~IPV6_ADDOPT_MLD_EN); +		break; +	case ISCSI_NET_PARAM_IPV6_FLOW_LABEL: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_flow_lbl = +				cpu_to_le16(*(uint16_t *)iface_param->value); +		break; +	case ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_traffic_class = iface_param->value[0]; +		break; +	case ISCSI_NET_PARAM_IPV6_HOP_LIMIT: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_hop_limit = iface_param->value[0]; +		break; +	case ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_nd_reach_time = +				cpu_to_le32(*(uint32_t *)iface_param->value); +		break; +	case ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_nd_rexmit_timer = +				cpu_to_le32(*(uint32_t *)iface_param->value); +		break; +	case ISCSI_NET_PARAM_IPV6_ND_STALE_TMO: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_nd_stale_timeout = +				cpu_to_le32(*(uint32_t *)iface_param->value); +		break; +	case ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; +		break; +	case ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv6_gw_advrt_mtu = +				cpu_to_le32(*(uint32_t *)iface_param->value); +		break;  	default:  		ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",  			   iface_param->param); @@ -1304,6 +2299,196 @@ static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,  		init_fw_cb->ipv4_port =  				cpu_to_le16(*(uint16_t *)iface_param->value);  		break; +	case ISCSI_NET_PARAM_DELAYED_ACK_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) +			init_fw_cb->ipv4_tcp_opts |= +				cpu_to_le16(TCPOPT_DELAYED_ACK_DISABLE); +		else +			init_fw_cb->ipv4_tcp_opts &= +				cpu_to_le16(~TCPOPT_DELAYED_ACK_DISABLE & +					    0xFFFF); +		break; +	case ISCSI_NET_PARAM_TCP_NAGLE_DISABLE: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) +			init_fw_cb->ipv4_tcp_opts |= +				cpu_to_le16(TCPOPT_NAGLE_ALGO_DISABLE); +		else +			init_fw_cb->ipv4_tcp_opts &= +				cpu_to_le16(~TCPOPT_NAGLE_ALGO_DISABLE); +		break; +	case ISCSI_NET_PARAM_TCP_WSF_DISABLE: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) +			init_fw_cb->ipv4_tcp_opts |= +				cpu_to_le16(TCPOPT_WINDOW_SCALE_DISABLE); +		else +			init_fw_cb->ipv4_tcp_opts &= +				cpu_to_le16(~TCPOPT_WINDOW_SCALE_DISABLE); +		break; +	case ISCSI_NET_PARAM_TCP_WSF: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; +		break; +	case ISCSI_NET_PARAM_TCP_TIMER_SCALE: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv4_tcp_opts &= cpu_to_le16(~TCPOPT_TIMER_SCALE); +		init_fw_cb->ipv4_tcp_opts |= +				cpu_to_le16((iface_param->value[0] << 1) & +					    TCPOPT_TIMER_SCALE); +		break; +	case ISCSI_NET_PARAM_TCP_TIMESTAMP_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_tcp_opts |= +				cpu_to_le16(TCPOPT_TIMESTAMP_ENABLE); +		else +			init_fw_cb->ipv4_tcp_opts &= +				cpu_to_le16(~TCPOPT_TIMESTAMP_ENABLE); +		break; +	case ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_tcp_opts |= +				cpu_to_le16(TCPOPT_DNS_SERVER_IP_EN); +		else +			init_fw_cb->ipv4_tcp_opts &= +				cpu_to_le16(~TCPOPT_DNS_SERVER_IP_EN); +		break; +	case ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_tcp_opts |= +				cpu_to_le16(TCPOPT_SLP_DA_INFO_EN); +		else +			init_fw_cb->ipv4_tcp_opts &= +				cpu_to_le16(~TCPOPT_SLP_DA_INFO_EN); +		break; +	case ISCSI_NET_PARAM_IPV4_TOS_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_ip_opts |= +				cpu_to_le16(IPOPT_IPV4_TOS_EN); +		else +			init_fw_cb->ipv4_ip_opts &= +				cpu_to_le16(~IPOPT_IPV4_TOS_EN); +		break; +	case ISCSI_NET_PARAM_IPV4_TOS: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv4_tos = iface_param->value[0]; +		break; +	case ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_ip_opts |= +					cpu_to_le16(IPOPT_GRAT_ARP_EN); +		else +			init_fw_cb->ipv4_ip_opts &= +					cpu_to_le16(~IPOPT_GRAT_ARP_EN); +		break; +	case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_ip_opts |= +				cpu_to_le16(IPOPT_ALT_CID_EN); +		else +			init_fw_cb->ipv4_ip_opts &= +				cpu_to_le16(~IPOPT_ALT_CID_EN); +		break; +	case ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID: +		if (iface_param->iface_num & 0x1) +			break; +		memcpy(init_fw_cb->ipv4_dhcp_alt_cid, iface_param->value, +		       (sizeof(init_fw_cb->ipv4_dhcp_alt_cid) - 1)); +		init_fw_cb->ipv4_dhcp_alt_cid_len = +					strlen(init_fw_cb->ipv4_dhcp_alt_cid); +		break; +	case ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_ip_opts |= +					cpu_to_le16(IPOPT_REQ_VID_EN); +		else +			init_fw_cb->ipv4_ip_opts &= +					cpu_to_le16(~IPOPT_REQ_VID_EN); +		break; +	case ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_ip_opts |= +					cpu_to_le16(IPOPT_USE_VID_EN); +		else +			init_fw_cb->ipv4_ip_opts &= +					cpu_to_le16(~IPOPT_USE_VID_EN); +		break; +	case ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID: +		if (iface_param->iface_num & 0x1) +			break; +		memcpy(init_fw_cb->ipv4_dhcp_vid, iface_param->value, +		       (sizeof(init_fw_cb->ipv4_dhcp_vid) - 1)); +		init_fw_cb->ipv4_dhcp_vid_len = +					strlen(init_fw_cb->ipv4_dhcp_vid); +		break; +	case ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_ip_opts |= +					cpu_to_le16(IPOPT_LEARN_IQN_EN); +		else +			init_fw_cb->ipv4_ip_opts &= +					cpu_to_le16(~IPOPT_LEARN_IQN_EN); +		break; +	case ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_DISABLE) +			init_fw_cb->ipv4_ip_opts |= +				cpu_to_le16(IPOPT_FRAGMENTATION_DISABLE); +		else +			init_fw_cb->ipv4_ip_opts &= +				cpu_to_le16(~IPOPT_FRAGMENTATION_DISABLE); +		break; +	case ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_ip_opts |= +				cpu_to_le16(IPOPT_IN_FORWARD_EN); +		else +			init_fw_cb->ipv4_ip_opts &= +				cpu_to_le16(~IPOPT_IN_FORWARD_EN); +		break; +	case ISCSI_NET_PARAM_REDIRECT_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->ipv4_ip_opts |= +				cpu_to_le16(IPOPT_ARP_REDIRECT_EN); +		else +			init_fw_cb->ipv4_ip_opts &= +				cpu_to_le16(~IPOPT_ARP_REDIRECT_EN); +		break; +	case ISCSI_NET_PARAM_IPV4_TTL: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->ipv4_ttl = iface_param->value[0]; +		break;  	default:  		ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",  			   iface_param->param); @@ -1311,6 +2496,168 @@ static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,  	}  } +static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha, +				    struct iscsi_iface_param_info *iface_param, +				    struct addr_ctrl_blk *init_fw_cb) +{ +	switch (iface_param->param) { +	case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->def_timeout = +				cpu_to_le16(*(uint16_t *)iface_param->value); +		break; +	case ISCSI_IFACE_PARAM_HDRDGST_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_HEADER_DIGEST_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_HEADER_DIGEST_EN); +		break; +	case ISCSI_IFACE_PARAM_DATADGST_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_DATA_DIGEST_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_DATA_DIGEST_EN); +		break; +	case ISCSI_IFACE_PARAM_IMM_DATA_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_IMMEDIATE_DATA_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_IMMEDIATE_DATA_EN); +		break; +	case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_INITIAL_R2T_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_INITIAL_R2T_EN); +		break; +	case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_DATA_SEQ_INORDER_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_DATA_SEQ_INORDER_EN); +		break; +	case ISCSI_IFACE_PARAM_PDU_INORDER_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_DATA_PDU_INORDER_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_DATA_PDU_INORDER_EN); +		break; +	case ISCSI_IFACE_PARAM_ERL: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->iscsi_opts &= cpu_to_le16(~ISCSIOPTS_ERL); +		init_fw_cb->iscsi_opts |= cpu_to_le16(iface_param->value[0] & +						      ISCSIOPTS_ERL); +		break; +	case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->iscsi_max_pdu_size = +				cpu_to_le32(*(uint32_t *)iface_param->value) / +				BYTE_UNITS; +		break; +	case ISCSI_IFACE_PARAM_FIRST_BURST: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->iscsi_fburst_len = +				cpu_to_le32(*(uint32_t *)iface_param->value) / +				BYTE_UNITS; +		break; +	case ISCSI_IFACE_PARAM_MAX_R2T: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->iscsi_max_outstnd_r2t = +				cpu_to_le16(*(uint16_t *)iface_param->value); +		break; +	case ISCSI_IFACE_PARAM_MAX_BURST: +		if (iface_param->iface_num & 0x1) +			break; +		init_fw_cb->iscsi_max_burst_len = +				cpu_to_le32(*(uint32_t *)iface_param->value) / +				BYTE_UNITS; +		break; +	case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_CHAP_AUTH_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_CHAP_AUTH_EN); +		break; +	case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_BIDI_CHAP_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_BIDI_CHAP_EN); +		break; +	case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_DISCOVERY_AUTH_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_DISCOVERY_AUTH_EN); +		break; +	case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_DISCOVERY_LOGOUT_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_DISCOVERY_LOGOUT_EN); +		break; +	case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: +		if (iface_param->iface_num & 0x1) +			break; +		if (iface_param->value[0] == ISCSI_NET_PARAM_ENABLE) +			init_fw_cb->iscsi_opts |= +				cpu_to_le16(ISCSIOPTS_STRICT_LOGIN_COMP_EN); +		else +			init_fw_cb->iscsi_opts &= +				cpu_to_le16(~ISCSIOPTS_STRICT_LOGIN_COMP_EN); +		break; +	default: +		ql4_printk(KERN_ERR, ha, "Unknown iscsi param = %d\n", +			   iface_param->param); +		break; +	} +} +  static void  qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)  { @@ -1368,40 +2715,47 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)  	nla_for_each_attr(attr, data, len, rem) {  		iface_param = nla_data(attr); -		if (iface_param->param_type != ISCSI_NET_PARAM) -			continue; - -		switch (iface_param->iface_type) { -		case ISCSI_IFACE_TYPE_IPV4: -			switch (iface_param->iface_num) { -			case 0: -				qla4xxx_set_ipv4(ha, iface_param, init_fw_cb); -				break; -			default: +		if (iface_param->param_type == ISCSI_NET_PARAM) { +			switch (iface_param->iface_type) { +			case ISCSI_IFACE_TYPE_IPV4: +				switch (iface_param->iface_num) { +				case 0: +					qla4xxx_set_ipv4(ha, iface_param, +							 init_fw_cb); +					break; +				default:  				/* Cannot have more than one IPv4 interface */ -				ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface " -					   "number = %d\n", -					   iface_param->iface_num); +					ql4_printk(KERN_ERR, ha, +						   "Invalid IPv4 iface number = %d\n", +						   iface_param->iface_num); +					break; +				}  				break; -			} -			break; -		case ISCSI_IFACE_TYPE_IPV6: -			switch (iface_param->iface_num) { -			case 0: -			case 1: -				qla4xxx_set_ipv6(ha, iface_param, init_fw_cb); +			case ISCSI_IFACE_TYPE_IPV6: +				switch (iface_param->iface_num) { +				case 0: +				case 1: +					qla4xxx_set_ipv6(ha, iface_param, +							 init_fw_cb); +					break; +				default: +				/* Cannot have more than two IPv6 interface */ +					ql4_printk(KERN_ERR, ha, +						   "Invalid IPv6 iface number = %d\n", +						   iface_param->iface_num); +					break; +				}  				break;  			default: -				/* Cannot have more than two IPv6 interface */ -				ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface " -					   "number = %d\n", -					   iface_param->iface_num); +				ql4_printk(KERN_ERR, ha, +					   "Invalid iface type\n");  				break;  			} -			break; -		default: -			ql4_printk(KERN_ERR, ha, "Invalid iface type\n"); -			break; +		} else if (iface_param->param_type == ISCSI_IFACE_PARAM) { +				qla4xxx_set_iscsi_param(ha, iface_param, +							init_fw_cb); +		} else { +			continue;  		}  	} @@ -1455,9 +2809,12 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,  	struct iscsi_session *sess = cls_sess->dd_data;  	struct ddb_entry *ddb_entry = sess->dd_data;  	struct scsi_qla_host *ha = ddb_entry->ha; +	struct iscsi_cls_conn *cls_conn = ddb_entry->conn; +	struct ql4_chap_table chap_tbl;  	int rval, len;  	uint16_t idx; +	memset(&chap_tbl, 0, sizeof(chap_tbl));  	switch (param) {  	case ISCSI_PARAM_CHAP_IN_IDX:  		rval = qla4xxx_get_chap_index(ha, sess->username_in, @@ -1469,14 +2826,46 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,  			len = sprintf(buf, "%hu\n", idx);  		break;  	case ISCSI_PARAM_CHAP_OUT_IDX: -		rval = qla4xxx_get_chap_index(ha, sess->username, -					      sess->password, LOCAL_CHAP, -					      &idx); +		if (ddb_entry->ddb_type == FLASH_DDB) { +			if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { +				idx = ddb_entry->chap_tbl_idx; +				rval = QLA_SUCCESS; +			} else { +				rval = QLA_ERROR; +			} +		} else { +			rval = qla4xxx_get_chap_index(ha, sess->username, +						      sess->password, +						      LOCAL_CHAP, &idx); +		}  		if (rval)  			len = sprintf(buf, "\n");  		else  			len = sprintf(buf, "%hu\n", idx);  		break; +	case ISCSI_PARAM_USERNAME: +	case ISCSI_PARAM_PASSWORD: +		/* First, populate session username and password for FLASH DDB, +		 * if not already done. This happens when session login fails +		 * for a FLASH DDB. +		 */ +		if (ddb_entry->ddb_type == FLASH_DDB && +		    ddb_entry->chap_tbl_idx != INVALID_ENTRY && +		    !sess->username && !sess->password) { +			idx = ddb_entry->chap_tbl_idx; +			rval = qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, +							    chap_tbl.secret, +							    idx); +			if (!rval) { +				iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, +						(char *)chap_tbl.name, +						strlen((char *)chap_tbl.name)); +				iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, +						(char *)chap_tbl.secret, +						chap_tbl.secret_len); +			} +		} +		/* allow fall-through */  	default:  		return iscsi_session_get_param(cls_sess, param, buf);  	} @@ -1490,7 +2879,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,  	struct iscsi_conn *conn;  	struct qla_conn *qla_conn;  	struct sockaddr *dst_addr; -	int len = 0;  	conn = cls_conn->dd_data;  	qla_conn = conn->dd_data; @@ -1504,9 +2892,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,  	default:  		return iscsi_conn_get_param(cls_conn, param, buf);  	} - -	return len; -  }  int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index) @@ -1667,7 +3052,6 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,  	struct sockaddr *dst_addr;  	int ret; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));  	if (!ep) {  		printk(KERN_ERR "qla4xxx: missing ep.\n");  		return NULL; @@ -1676,6 +3060,8 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,  	qla_ep = ep->dd_data;  	dst_addr = (struct sockaddr *)&qla_ep->dst_addr;  	ha = to_qla_host(qla_ep->host); +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, +			  ha->host_no));  	ret = qla4xxx_get_ddb_index(ha, &ddb_index);  	if (ret == QLA_ERROR) @@ -1696,6 +3082,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,  	ddb_entry->sess = cls_sess;  	ddb_entry->unblock_sess = qla4xxx_unblock_ddb;  	ddb_entry->ddb_change = qla4xxx_ddb_change; +	clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags);  	cls_sess->recovery_tmo = ql4xsess_recovery_tmo;  	ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;  	ha->tot_ddbs++; @@ -1714,10 +3101,11 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)  	uint32_t ddb_state;  	int ret; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));  	sess = cls_sess->dd_data;  	ddb_entry = sess->dd_data;  	ha = ddb_entry->ha; +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__, +			  ha->host_no));  	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),  					  &fw_ddb_entry_dma, GFP_KERNEL); @@ -1745,7 +3133,8 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)  destroy_session:  	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); - +	if (test_and_clear_bit(DDB_CONN_CLOSE_FAILURE, &ddb_entry->flags)) +		clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);  	spin_lock_irqsave(&ha->hardware_lock, flags);  	qla4xxx_free_ddb(ha, ddb_entry);  	spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1763,17 +3152,23 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)  	struct iscsi_cls_conn *cls_conn;  	struct iscsi_session *sess;  	struct ddb_entry *ddb_entry; +	struct scsi_qla_host *ha; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));  	cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),  				    conn_idx); -	if (!cls_conn) +	if (!cls_conn) { +		pr_info("%s: Can not create connection for conn_idx = %u\n", +			__func__, conn_idx);  		return NULL; +	}  	sess = cls_sess->dd_data;  	ddb_entry = sess->dd_data;  	ddb_entry->conn = cls_conn; +	ha = ddb_entry->ha; +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: conn_idx = %u\n", __func__, +			  conn_idx));  	return cls_conn;  } @@ -1784,8 +3179,16 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,  	struct iscsi_conn *conn;  	struct qla_conn *qla_conn;  	struct iscsi_endpoint *ep; +	struct ddb_entry *ddb_entry; +	struct scsi_qla_host *ha; +	struct iscsi_session *sess; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); +	sess = cls_session->dd_data; +	ddb_entry = sess->dd_data; +	ha = ddb_entry->ha; + +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, +			  cls_session->sid, cls_conn->cid));  	if (iscsi_conn_bind(cls_session, cls_conn, is_leading))  		return -EINVAL; @@ -1808,10 +3211,11 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)  	int ret = 0;  	int status = QLA_SUCCESS; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));  	sess = cls_sess->dd_data;  	ddb_entry = sess->dd_data;  	ha = ddb_entry->ha; +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: sid = %d, cid = %d\n", __func__, +			  cls_sess->sid, cls_conn->cid));  	/* Check if we have  matching FW DDB, if yes then do not  	 * login to this target. This could cause target to logout previous @@ -1885,10 +3289,11 @@ static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)  	struct ddb_entry *ddb_entry;  	int options; -	DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));  	sess = cls_sess->dd_data;  	ddb_entry = sess->dd_data;  	ha = ddb_entry->ha; +	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: cid = %d\n", __func__, +			  cls_conn->cid));  	options = LOGOUT_OPTION_CLOSE_SESSION;  	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) @@ -2160,14 +3565,13 @@ static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,  	if (test_bit(OPT_IPV6_DEVICE, &options)) {  		conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; -		conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL); +		conn->link_local_ipv6_addr = kmemdup( +					fw_ddb_entry->link_local_ipv6_addr, +					IPv6_ADDR_LEN, GFP_KERNEL);  		if (!conn->link_local_ipv6_addr) {  			rc = -ENOMEM;  			goto exit_copy;  		} - -		memcpy(conn->link_local_ipv6_addr, -		       fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);  	} else {  		conn->ipv4_tos = fw_ddb_entry->ipv4_tos;  	} @@ -2306,6 +3710,7 @@ static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,  	unsigned long options = 0;  	uint16_t ddb_link;  	uint16_t disc_parent; +	char ip_addr[DDB_IPADDR_LEN];  	options = le16_to_cpu(fw_ddb_entry->options);  	conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options); @@ -2373,11 +3778,6 @@ static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,  	COPY_ISID(sess->isid, fw_ddb_entry->isid);  	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); -	if (ddb_link < MAX_DDB_ENTRIES) -		sess->discovery_parent_idx = ddb_link; -	else -		sess->discovery_parent_idx = DDB_NO_LINK; -  	if (ddb_link == DDB_ISNS)  		disc_parent = ISCSI_DISC_PARENT_ISNS;  	else if (ddb_link == DDB_NO_LINK) @@ -2392,6 +3792,14 @@ static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,  	iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,  			(char *)fw_ddb_entry->iscsi_alias, 0); + +	options = le16_to_cpu(fw_ddb_entry->options); +	if (options & DDB_OPT_IPV6_DEVICE) { +		memset(ip_addr, 0, sizeof(ip_addr)); +		sprintf(ip_addr, "%pI6", fw_ddb_entry->link_local_ipv6_addr); +		iscsi_set_param(conn->cls_conn, ISCSI_PARAM_LOCAL_IPADDR, +				(char *)ip_addr, 0); +	}  }  static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha, @@ -2402,6 +3810,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,  	int buflen = 0;  	struct iscsi_session *sess;  	struct ddb_entry *ddb_entry; +	struct ql4_chap_table chap_tbl;  	struct iscsi_conn *conn;  	char ip_addr[DDB_IPADDR_LEN];  	uint16_t options = 0; @@ -2409,6 +3818,7 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,  	sess = cls_sess->dd_data;  	ddb_entry = sess->dd_data;  	conn = cls_conn->dd_data; +	memset(&chap_tbl, 0, sizeof(chap_tbl));  	ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx); @@ -2435,6 +3845,19 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,  			(char *)fw_ddb_entry->iscsi_name, buflen);  	iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,  			(char *)ha->name_string, buflen); + +	if (ddb_entry->chap_tbl_idx != INVALID_ENTRY) { +		if (!qla4xxx_get_uni_chap_at_index(ha, chap_tbl.name, +						   chap_tbl.secret, +						   ddb_entry->chap_tbl_idx)) { +			iscsi_set_param(cls_conn, ISCSI_PARAM_USERNAME, +					(char *)chap_tbl.name, +					strlen((char *)chap_tbl.name)); +			iscsi_set_param(cls_conn, ISCSI_PARAM_PASSWORD, +					(char *)chap_tbl.secret, +					chap_tbl.secret_len); +		} +	}  }  void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, @@ -2975,6 +4398,11 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)  	uint32_t dev_state;  	uint32_t idc_ctrl; +	if (is_qla8032(ha) && +	    (qla4_83xx_is_detached(ha) == QLA_SUCCESS)) +		WARN_ONCE(1, "%s: iSCSI function %d marked invisible\n", +			  __func__, ha->func_num); +  	/* don't poll if reset is going on */  	if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||  	    test_bit(DPC_RESET_HA, &ha->dpc_flags) || @@ -3132,6 +4560,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)  	     test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||  	     test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||  	     test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || +	     test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||  	     test_bit(DPC_AEN, &ha->dpc_flags)) {  		DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"  			      " - dpc flags = 0x%lx\n", @@ -3157,11 +4586,19 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)  	uint32_t index = 0;  	unsigned long flags;  	struct scsi_cmnd *cmd; +	unsigned long wtime; +	uint32_t wtmo; -	unsigned long wtime = jiffies + (WAIT_CMD_TOV * HZ); +	if (is_qla40XX(ha)) +		wtmo = WAIT_CMD_TOV; +	else +		wtmo = ha->nx_reset_timeout / 2; -	DEBUG2(ql4_printk(KERN_INFO, ha, "Wait up to %d seconds for cmds to " -	    "complete\n", WAIT_CMD_TOV)); +	wtime = jiffies + (wtmo * HZ); + +	DEBUG2(ql4_printk(KERN_INFO, ha, +			  "Wait up to %u seconds for cmds to complete\n", +			  wtmo));  	while (!time_after_eq(jiffies, wtime)) {  		spin_lock_irqsave(&ha->hardware_lock, flags); @@ -3421,9 +4858,6 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)  		    ha->host_no, __func__));  		status = ha->isp_ops->reset_firmware(ha);  		if (status == QLA_SUCCESS) { -			if (!test_bit(AF_FW_RECOVERY, &ha->flags)) -				qla4xxx_cmd_wait(ha); -  			ha->isp_ops->disable_intrs(ha);  			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);  			qla4xxx_abort_active_cmds(ha, DID_RESET << 16); @@ -3464,11 +4898,11 @@ chip_reset:  			qla4xxx_cmd_wait(ha);  		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); -		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);  		DEBUG2(ql4_printk(KERN_INFO, ha,  		    "scsi%ld: %s - Performing chip reset..\n",  		    ha->host_no, __func__));  		status = ha->isp_ops->reset_chip(ha); +		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);  	}  	/* Flush any pending ddb changed AENs */ @@ -3484,8 +4918,21 @@ recover_ha_init_adapter:  			ssleep(6);  		/* NOTE: AF_ONLINE flag set upon successful completion of -		 *       qla4xxx_initialize_adapter */ +		 * qla4xxx_initialize_adapter */  		status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER); +		if (is_qla80XX(ha) && (status == QLA_ERROR)) { +			status = qla4_8xxx_check_init_adapter_retry(ha); +			if (status == QLA_ERROR) { +				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Don't retry recover adapter\n", +					   ha->host_no, __func__); +				qla4xxx_dead_adapter_cleanup(ha); +				clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags); +				clear_bit(DPC_RESET_HA, &ha->dpc_flags); +				clear_bit(DPC_RESET_HA_FW_CONTEXT, +					  &ha->dpc_flags); +				goto exit_recover; +			} +		}  	}  	/* Retry failed adapter initialization, if necessary @@ -3831,9 +5278,9 @@ static void qla4xxx_do_dpc(struct work_struct *work)  		container_of(work, struct scsi_qla_host, dpc_work);  	int status = QLA_ERROR; -	DEBUG2(printk("scsi%ld: %s: DPC handler waking up." -	    "flags = 0x%08lx, dpc_flags = 0x%08lx\n", -	    ha->host_no, __func__, ha->flags, ha->dpc_flags)) +	DEBUG2(ql4_printk(KERN_INFO, ha, +			  "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", +			  ha->host_no, __func__, ha->flags, ha->dpc_flags));  	/* Initialization not yet finished. Don't do anything yet. */  	if (!test_bit(AF_INIT_DONE, &ha->flags)) @@ -3978,6 +5425,11 @@ dpc_post_reset_ha:  				qla4xxx_relogin_all_devices(ha);  		}  	} +	if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) { +		if (qla4xxx_sysfs_ddb_export(ha)) +			ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n", +				   __func__); +	}  }  /** @@ -4788,64 +6240,6 @@ kset_free:  } -/** - * qla4xxx_create chap_list - Create CHAP list from FLASH - * @ha: pointer to adapter structure - * - * Read flash and make a list of CHAP entries, during login when a CHAP entry - * is received, it will be checked in this list. If entry exist then the CHAP - * entry index is set in the DDB. If CHAP entry does not exist in this list - * then a new entry is added in FLASH in CHAP table and the index obtained is - * used in the DDB. - **/ -static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) -{ -	int rval = 0; -	uint8_t *chap_flash_data = NULL; -	uint32_t offset; -	dma_addr_t chap_dma; -	uint32_t chap_size = 0; - -	if (is_qla40XX(ha)) -		chap_size = MAX_CHAP_ENTRIES_40XX  * -					sizeof(struct ql4_chap_table); -	else	/* Single region contains CHAP info for both -		 * ports which is divided into half for each port. -		 */ -		chap_size = ha->hw.flt_chap_size / 2; - -	chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, -					  &chap_dma, GFP_KERNEL); -	if (!chap_flash_data) { -		ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); -		return; -	} -	if (is_qla40XX(ha)) -		offset = FLASH_CHAP_OFFSET; -	else { -		offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); -		if (ha->port_num == 1) -			offset += chap_size; -	} - -	rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); -	if (rval != QLA_SUCCESS) -		goto exit_chap_list; - -	if (ha->chap_list == NULL) -		ha->chap_list = vmalloc(chap_size); -	if (ha->chap_list == NULL) { -		ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); -		goto exit_chap_list; -	} - -	memcpy(ha->chap_list, chap_flash_data, chap_size); - -exit_chap_list: -	dma_free_coherent(&ha->pdev->dev, chap_size, -			chap_flash_data, chap_dma); -} -  static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,  				  struct ql4_tuple_ddb *tddb)  { @@ -4937,7 +6331,8 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,  }  static int qla4xxx_is_session_exists(struct scsi_qla_host *ha, -				     struct dev_db_entry *fw_ddb_entry) +				     struct dev_db_entry *fw_ddb_entry, +				     uint32_t *index)  {  	struct ddb_entry *ddb_entry;  	struct ql4_tuple_ddb *fw_tddb = NULL; @@ -4971,6 +6366,8 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,  		qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);  		if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {  			ret = QLA_SUCCESS; /* found */ +			if (index != NULL) +				*index = idx;  			goto exit_check;  		}  	} @@ -5206,6 +6603,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,  	ddb_entry->ha = ha;  	ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;  	ddb_entry->ddb_change = qla4xxx_flash_ddb_change; +	ddb_entry->chap_tbl_idx = INVALID_ENTRY;  	atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);  	atomic_set(&ddb_entry->relogin_timer, 0); @@ -5267,6 +6665,87 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)  	} while (time_after(wtime, jiffies));  } +static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry, +				  struct dev_db_entry *flash_ddb_entry) +{ +	uint16_t options = 0; +	size_t ip_len = IP_ADDR_LEN; + +	options = le16_to_cpu(fw_ddb_entry->options); +	if (options & DDB_OPT_IPV6_DEVICE) +		ip_len = IPv6_ADDR_LEN; + +	if (memcmp(fw_ddb_entry->ip_addr, flash_ddb_entry->ip_addr, ip_len)) +		return QLA_ERROR; + +	if (memcmp(&fw_ddb_entry->isid[0], &flash_ddb_entry->isid[0], +		   sizeof(fw_ddb_entry->isid))) +		return QLA_ERROR; + +	if (memcmp(&fw_ddb_entry->port, &flash_ddb_entry->port, +		   sizeof(fw_ddb_entry->port))) +		return QLA_ERROR; + +	return QLA_SUCCESS; +} + +static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha, +				     struct dev_db_entry *fw_ddb_entry, +				     uint32_t fw_idx, uint32_t *flash_index) +{ +	struct dev_db_entry *flash_ddb_entry; +	dma_addr_t flash_ddb_entry_dma; +	uint32_t idx = 0; +	int max_ddbs; +	int ret = QLA_ERROR, status; + +	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : +				     MAX_DEV_DB_ENTRIES; + +	flash_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, +					 &flash_ddb_entry_dma); +	if (flash_ddb_entry == NULL || fw_ddb_entry == NULL) { +		ql4_printk(KERN_ERR, ha, "Out of memory\n"); +		goto exit_find_st_idx; +	} + +	status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, +					  flash_ddb_entry_dma, fw_idx); +	if (status == QLA_SUCCESS) { +		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); +		if (status == QLA_SUCCESS) { +			*flash_index = fw_idx; +			ret = QLA_SUCCESS; +			goto exit_find_st_idx; +		} +	} + +	for (idx = 0; idx < max_ddbs; idx++) { +		status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, +						  flash_ddb_entry_dma, idx); +		if (status == QLA_ERROR) +			continue; + +		status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); +		if (status == QLA_SUCCESS) { +			*flash_index = idx; +			ret = QLA_SUCCESS; +			goto exit_find_st_idx; +		} +	} + +	if (idx == max_ddbs) +		ql4_printk(KERN_ERR, ha, "Failed to find ST [%d] in flash\n", +			   fw_idx); + +exit_find_st_idx: +	if (flash_ddb_entry) +		dma_pool_free(ha->fw_ddb_dma_pool, flash_ddb_entry, +			      flash_ddb_entry_dma); + +	return ret; +} +  static void qla4xxx_build_st_list(struct scsi_qla_host *ha,  				  struct list_head *list_st)  { @@ -5278,6 +6757,7 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,  	int ret;  	uint32_t idx = 0, next_idx = 0;  	uint32_t state = 0, conn_err = 0; +	uint32_t flash_index = -1;  	uint16_t conn_id = 0;  	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, @@ -5310,6 +6790,19 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,  		if (!st_ddb_idx)  			break; +		ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, +						&flash_index); +		if (ret == QLA_ERROR) { +			ql4_printk(KERN_ERR, ha, +				   "No flash entry for ST at idx [%d]\n", idx); +			st_ddb_idx->flash_ddb_idx = idx; +		} else { +			ql4_printk(KERN_INFO, ha, +				   "ST at idx [%d] is stored at flash [%d]\n", +				   idx, flash_index); +			st_ddb_idx->flash_ddb_idx = flash_index; +		} +  		st_ddb_idx->fw_ddb_idx = idx;  		list_add_tail(&st_ddb_idx->list, list_st); @@ -5354,6 +6847,28 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,  	}  } +static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha, +					 struct ddb_entry *ddb_entry, +					 struct dev_db_entry *fw_ddb_entry) +{ +	struct iscsi_cls_session *cls_sess; +	struct iscsi_session *sess; +	uint32_t max_ddbs = 0; +	uint16_t ddb_link = -1; + +	max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : +				     MAX_DEV_DB_ENTRIES; + +	cls_sess = ddb_entry->sess; +	sess = cls_sess->dd_data; + +	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); +	if (ddb_link < max_ddbs) +		sess->discovery_parent_idx = ddb_link; +	else +		sess->discovery_parent_idx = DDB_NO_LINK; +} +  static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,  				   struct dev_db_entry *fw_ddb_entry,  				   int is_reset, uint16_t idx) @@ -5418,6 +6933,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,  	/* Update sess/conn params */  	qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); +	qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry);  	if (is_reset == RESET_ADAPTER) {  		iscsi_block_session(cls_sess); @@ -5434,17 +6950,43 @@ exit_setup:  	return ret;  } +static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha, +				       struct list_head *list_ddb, +				       struct dev_db_entry *fw_ddb_entry) +{ +	struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp; +	uint16_t ddb_link; + +	ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); + +	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) { +		if (ddb_idx->fw_ddb_idx == ddb_link) { +			DEBUG2(ql4_printk(KERN_INFO, ha, +					  "Updating NT parent idx from [%d] to [%d]\n", +					  ddb_link, ddb_idx->flash_ddb_idx)); +			fw_ddb_entry->ddb_link = +					    cpu_to_le16(ddb_idx->flash_ddb_idx); +			return; +		} +	} +} +  static void qla4xxx_build_nt_list(struct scsi_qla_host *ha, -				  struct list_head *list_nt, int is_reset) +				  struct list_head *list_nt, +				  struct list_head *list_st, +				  int is_reset)  {  	struct dev_db_entry *fw_ddb_entry; +	struct ddb_entry *ddb_entry = NULL;  	dma_addr_t fw_ddb_dma;  	int max_ddbs;  	int fw_idx_size;  	int ret;  	uint32_t idx = 0, next_idx = 0;  	uint32_t state = 0, conn_err = 0; +	uint32_t ddb_idx = -1;  	uint16_t conn_id = 0; +	uint16_t ddb_link = -1;  	struct qla_ddb_index  *nt_ddb_idx;  	fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, @@ -5471,12 +7013,18 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,  		if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)  			goto continue_next_nt; +		ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link); +		if (ddb_link < max_ddbs) +			qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); +  		if (!(state == DDB_DS_NO_CONNECTION_ACTIVE || -		    state == DDB_DS_SESSION_FAILED)) +		    state == DDB_DS_SESSION_FAILED) && +		    (is_reset == INIT_ADAPTER))  			goto continue_next_nt;  		DEBUG2(ql4_printk(KERN_INFO, ha,  				  "Adding  DDB to session = 0x%x\n", idx)); +  		if (is_reset == INIT_ADAPTER) {  			nt_ddb_idx = vmalloc(fw_idx_size);  			if (!nt_ddb_idx) @@ -5506,9 +7054,17 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,  			list_add_tail(&nt_ddb_idx->list, list_nt);  		} else if (is_reset == RESET_ADAPTER) { -			if (qla4xxx_is_session_exists(ha, fw_ddb_entry) == -								QLA_SUCCESS) +			ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, +							&ddb_idx); +			if (ret == QLA_SUCCESS) { +				ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, +								       ddb_idx); +				if (ddb_entry != NULL) +					qla4xxx_update_sess_disc_idx(ha, +								     ddb_entry, +								  fw_ddb_entry);  				goto continue_next_nt; +			}  		}  		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx); @@ -5526,7 +7082,8 @@ exit_nt_list:  }  static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha, -				      struct list_head *list_nt) +				      struct list_head *list_nt, +				      uint16_t target_id)  {  	struct dev_db_entry *fw_ddb_entry;  	dma_addr_t fw_ddb_dma; @@ -5571,13 +7128,16 @@ static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha,  		nt_ddb_idx->fw_ddb_idx = idx; -		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry); +		ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);  		if (ret == QLA_SUCCESS) {  			/* free nt_ddb_idx and do not add to list_nt */  			vfree(nt_ddb_idx);  			goto continue_next_new_nt;  		} +		if (target_id < max_ddbs) +			fw_ddb_entry->ddb_link = cpu_to_le16(target_id); +  		list_add_tail(&nt_ddb_idx->list, list_nt);  		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER, @@ -5894,7 +7454,8 @@ exit_ddb_conn_open:  }  static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha, -				struct dev_db_entry *fw_ddb_entry) +				struct dev_db_entry *fw_ddb_entry, +				uint16_t target_id)  {  	struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;  	struct list_head list_nt; @@ -5919,7 +7480,7 @@ static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha,  	if (ret == QLA_ERROR)  		goto exit_login_st; -	qla4xxx_build_new_nt_list(ha, &list_nt); +	qla4xxx_build_new_nt_list(ha, &list_nt, target_id);  	list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, &list_nt, list) {  		list_del_init(&ddb_idx->list); @@ -5946,7 +7507,7 @@ static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha,  {  	int ret = QLA_ERROR; -	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry); +	ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, NULL);  	if (ret != QLA_SUCCESS)  		ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, RESET_ADAPTER,  					      idx); @@ -6001,7 +7562,8 @@ static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess,  	fw_ddb_entry->cookie = DDB_VALID_COOKIE;  	if (strlen((char *)fw_ddb_entry->iscsi_name) == 0) -		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry); +		ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, +					   fnode_sess->target_id);  	else  		ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry,  					   fnode_sess->target_id); @@ -6522,10 +8084,13 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,  	struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess);  	struct scsi_qla_host *ha = to_qla_host(shost);  	struct iscsi_flashnode_param_info *fnode_param; +	struct ql4_chap_table chap_tbl;  	struct nlattr *attr; +	uint16_t chap_out_idx = INVALID_ENTRY;  	int rc = QLA_ERROR;  	uint32_t rem = len; +	memset((void *)&chap_tbl, 0, sizeof(chap_tbl));  	nla_for_each_attr(attr, data, len, rem) {  		fnode_param = nla_data(attr); @@ -6567,6 +8132,10 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,  			break;  		case ISCSI_FLASHNODE_CHAP_AUTH_EN:  			fnode_sess->chap_auth_en = fnode_param->value[0]; +			/* Invalidate chap index if chap auth is disabled */ +			if (!fnode_sess->chap_auth_en) +				fnode_sess->chap_out_idx = INVALID_ENTRY; +  			break;  		case ISCSI_FLASHNODE_SNACK_REQ_EN:  			fnode_conn->snack_req_en = fnode_param->value[0]; @@ -6705,6 +8274,17 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,  			fnode_conn->exp_statsn =  						*(uint32_t *)fnode_param->value;  			break; +		case ISCSI_FLASHNODE_CHAP_OUT_IDX: +			chap_out_idx = *(uint16_t *)fnode_param->value; +			if (!qla4xxx_get_uni_chap_at_index(ha, +							   chap_tbl.name, +							   chap_tbl.secret, +							   chap_out_idx)) { +				fnode_sess->chap_out_idx = chap_out_idx; +				/* Enable chap auth if chap index is valid */ +				fnode_sess->chap_auth_en = QL4_PARAM_ENABLE; +			} +			break;  		default:  			ql4_printk(KERN_ERR, ha,  				   "%s: No such sysfs attribute\n", __func__); @@ -6827,7 +8407,7 @@ exit_ddb_del:   *   * Export the firmware DDB for all send targets and normal targets to sysfs.   **/ -static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha) +int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)  {  	struct dev_db_entry *fw_ddb_entry = NULL;  	dma_addr_t fw_ddb_entry_dma; @@ -6926,11 +8506,10 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)  		schedule_timeout_uninterruptible(HZ / 10);  	} while (time_after(wtime, jiffies)); -	/* Free up the sendtargets list */ -	qla4xxx_free_ddb_list(&list_st); -	qla4xxx_build_nt_list(ha, &list_nt, is_reset); +	qla4xxx_build_nt_list(ha, &list_nt, &list_st, is_reset); +	qla4xxx_free_ddb_list(&list_st);  	qla4xxx_free_ddb_list(&list_nt);  	qla4xxx_free_ddb_index(ha); @@ -7094,6 +8673,9 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,  	mutex_init(&ha->chap_sem);  	init_completion(&ha->mbx_intr_comp);  	init_completion(&ha->disable_acb_comp); +	init_completion(&ha->idc_comp); +	init_completion(&ha->link_up_comp); +	init_completion(&ha->disable_acb_comp);  	spin_lock_init(&ha->hardware_lock);  	spin_lock_init(&ha->work_lock); @@ -7154,11 +8736,8 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,  	status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);  	/* Dont retry adapter initialization if IRQ allocation failed */ -	if (is_qla80XX(ha) && !test_bit(AF_IRQ_ATTACHED, &ha->flags)) { -		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization\n", -			   __func__); +	if (is_qla80XX(ha) && (status == QLA_ERROR))  		goto skip_retry_init; -	}  	while ((!test_bit(AF_ONLINE, &ha->flags)) &&  	    init_retry_count++ < MAX_INIT_RETRIES) { @@ -7182,6 +8761,10 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,  			continue;  		status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER); +		if (is_qla80XX(ha) && (status == QLA_ERROR)) { +			if (qla4_8xxx_check_init_adapter_retry(ha) == QLA_ERROR) +				goto skip_retry_init; +		}  	}  skip_retry_init: @@ -7262,11 +8845,8 @@ skip_retry_init:  		ql4_printk(KERN_ERR, ha,  			   "%s: No iSCSI boot target configured\n", __func__); -	if (qla4xxx_sysfs_ddb_export(ha)) -		ql4_printk(KERN_ERR, ha, -			   "%s: Error exporting ddb to sysfs\n", __func__); - -		/* Perform the build ddb list and login to each */ +	set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags); +	/* Perform the build ddb list and login to each */  	qla4xxx_build_ddb_list(ha, INIT_ADAPTER);  	iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);  	qla4xxx_wait_login_resp_boot_tgt(ha); @@ -7330,10 +8910,56 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)  	}  } +static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha, +		struct ddb_entry *ddb_entry) +{ +	struct dev_db_entry *fw_ddb_entry = NULL; +	dma_addr_t fw_ddb_entry_dma; +	unsigned long wtime; +	uint32_t ddb_state; +	int options; +	int status; + +	options = LOGOUT_OPTION_CLOSE_SESSION; +	if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) { +		ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); +		goto clear_ddb; +	} + +	fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), +					  &fw_ddb_entry_dma, GFP_KERNEL); +	if (!fw_ddb_entry) { +		ql4_printk(KERN_ERR, ha, +			   "%s: Unable to allocate dma buffer\n", __func__); +		goto clear_ddb; +	} + +	wtime = jiffies + (HZ * LOGOUT_TOV); +	do { +		status = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, +						 fw_ddb_entry, fw_ddb_entry_dma, +						 NULL, NULL, &ddb_state, NULL, +						 NULL, NULL); +		if (status == QLA_ERROR) +			goto free_ddb; + +		if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) || +		    (ddb_state == DDB_DS_SESSION_FAILED)) +			goto free_ddb; + +		schedule_timeout_uninterruptible(HZ); +	} while ((time_after(wtime, jiffies))); + +free_ddb: +	dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), +			  fw_ddb_entry, fw_ddb_entry_dma); +clear_ddb: +	qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); +} +  static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)  {  	struct ddb_entry *ddb_entry; -	int options;  	int idx;  	for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) { @@ -7342,13 +8968,7 @@ static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)  		if ((ddb_entry != NULL) &&  		    (ddb_entry->ddb_type == FLASH_DDB)) { -			options = LOGOUT_OPTION_CLOSE_SESSION; -			if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) -			    == QLA_ERROR) -				ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", -					   __func__); - -			qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); +			qla4xxx_destroy_ddb(ha, ddb_entry);  			/*  			 * we have decremented the reference count of the driver  			 * when we setup the session to have the driver unload @@ -7400,7 +9020,6 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)  	pci_disable_pcie_error_reporting(pdev);  	pci_disable_device(pdev); -	pci_set_drvdata(pdev, NULL);  }  /** @@ -7610,14 +9229,15 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)  	int ret = SUCCESS;  	int wait = 0; -	ql4_printk(KERN_INFO, ha, -	    "scsi%ld:%d:%d: Abort command issued cmd=%p\n", -	    ha->host_no, id, lun, cmd); +	ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Abort command issued cmd=%p, cdb=0x%x\n", +		   ha->host_no, id, lun, cmd, cmd->cmnd[0]);  	spin_lock_irqsave(&ha->hardware_lock, flags);  	srb = (struct srb *) CMD_SP(cmd);  	if (!srb) {  		spin_unlock_irqrestore(&ha->hardware_lock, flags); +		ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d: Specified command has already completed.\n", +			   ha->host_no, id, lun);  		return SUCCESS;  	}  	kref_get(&srb->srb_ref); @@ -8034,28 +9654,36 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)  	}  	fn = PCI_FUNC(ha->pdev->devfn); -	while (fn > 0) { -		fn--; -		ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at " -		    "func %x\n", ha->host_no, __func__, fn); -		/* Get the pci device given the domain, bus, -		 * slot/function number */ -		other_pdev = -		    pci_get_domain_bus_and_slot(pci_domain_nr(ha->pdev->bus), -		    ha->pdev->bus->number, PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), -		    fn)); - -		if (!other_pdev) -			continue; +	if (is_qla8022(ha)) { +		while (fn > 0) { +			fn--; +			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Finding PCI device at func %x\n", +				   ha->host_no, __func__, fn); +			/* Get the pci device given the domain, bus, +			 * slot/function number */ +			other_pdev = pci_get_domain_bus_and_slot( +					   pci_domain_nr(ha->pdev->bus), +					   ha->pdev->bus->number, +					   PCI_DEVFN(PCI_SLOT(ha->pdev->devfn), +					   fn)); + +			if (!other_pdev) +				continue; -		if (atomic_read(&other_pdev->enable_cnt)) { -			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI " -			    "func in enabled state%x\n", ha->host_no, -			    __func__, fn); +			if (atomic_read(&other_pdev->enable_cnt)) { +				ql4_printk(KERN_INFO, ha, "scsi%ld: %s: Found PCI func in enabled state%x\n", +					   ha->host_no, __func__, fn); +				pci_dev_put(other_pdev); +				break; +			}  			pci_dev_put(other_pdev); -			break;  		} -		pci_dev_put(other_pdev); +	} else { +		/* this case is meant for ISP83xx/ISP84xx only */ +		if (qla4_83xx_can_perform_reset(ha)) { +			/* reset fn as iSCSI is going to perform the reset */ +			fn = 0; +		}  	}  	/* The first function on the card, the reset owner will @@ -8089,6 +9717,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)  		if (rval != QLA_SUCCESS) {  			ql4_printk(KERN_INFO, ha, "scsi%ld: %s: HW State: "  			    "FAILED\n", ha->host_no, __func__); +			qla4xxx_free_irqs(ha);  			ha->isp_ops->idc_lock(ha);  			qla4_8xxx_clear_drv_active(ha);  			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, @@ -8116,6 +9745,8 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)  			rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);  			if (rval == QLA_SUCCESS)  				ha->isp_ops->enable_intrs(ha); +			else +				qla4xxx_free_irqs(ha);  			ha->isp_ops->idc_lock(ha);  			qla4_8xxx_set_drv_active(ha); diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index f4fef72c9bc..f11eaa77333 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@   * See LICENSE.qla4xxx for copyright and licensing details.   */ -#define QLA4XXX_DRIVER_VERSION	"5.04.00-k1" +#define QLA4XXX_DRIVER_VERSION	"5.04.00-k6" diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index eaa808e6ba9..88d46fe6bf9 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -78,11 +78,6 @@ static void scsi_done(struct scsi_cmnd *cmd);   * Definitions and constants.   */ -#define MIN_RESET_DELAY (2*HZ) - -/* Do not call reset on error if we just did a reset within 15 sec. */ -#define MIN_RESET_PERIOD (15*HZ) -  /*   * Note - the initial logging level can be set here to log events at boot time.   * After the system is up, you may enable logging via the /proc interface. @@ -96,6 +91,15 @@ EXPORT_SYMBOL(scsi_logging_level);  ASYNC_DOMAIN(scsi_sd_probe_domain);  EXPORT_SYMBOL(scsi_sd_probe_domain); +/* + * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of + * asynchronous system resume operations.  It is marked 'exclusive' to avoid + * being included in the async_synchronize_full() that is invoked by + * dpm_resume() + */ +ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain); +EXPORT_SYMBOL(scsi_sd_pm_domain); +  /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.   * You may not alter any existing entry (although adding new ones is   * encouraged once assigned by ANSI/INCITS T10 @@ -166,47 +170,20 @@ static struct scsi_host_cmd_pool scsi_cmd_dma_pool = {  static DEFINE_MUTEX(host_cmd_pool_mutex);  /** - * scsi_pool_alloc_command - internal function to get a fully allocated command - * @pool:	slab pool to allocate the command from - * @gfp_mask:	mask for the allocation - * - * Returns a fully allocated command (with the allied sense buffer) or - * NULL on failure - */ -static struct scsi_cmnd * -scsi_pool_alloc_command(struct scsi_host_cmd_pool *pool, gfp_t gfp_mask) -{ -	struct scsi_cmnd *cmd; - -	cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask); -	if (!cmd) -		return NULL; - -	cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, -					     gfp_mask | pool->gfp_mask); -	if (!cmd->sense_buffer) { -		kmem_cache_free(pool->cmd_slab, cmd); -		return NULL; -	} - -	return cmd; -} - -/** - * scsi_pool_free_command - internal function to release a command - * @pool:	slab pool to allocate the command from + * scsi_host_free_command - internal function to release a command + * @shost:	host to free the command for   * @cmd:	command to release   *   * the command must previously have been allocated by - * scsi_pool_alloc_command. + * scsi_host_alloc_command.   */  static void -scsi_pool_free_command(struct scsi_host_cmd_pool *pool, -			 struct scsi_cmnd *cmd) +scsi_host_free_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)  { +	struct scsi_host_cmd_pool *pool = shost->cmd_pool; +  	if (cmd->prot_sdb)  		kmem_cache_free(scsi_sdb_cache, cmd->prot_sdb); -  	kmem_cache_free(pool->sense_slab, cmd->sense_buffer);  	kmem_cache_free(pool->cmd_slab, cmd);  } @@ -222,22 +199,32 @@ scsi_pool_free_command(struct scsi_host_cmd_pool *pool,  static struct scsi_cmnd *  scsi_host_alloc_command(struct Scsi_Host *shost, gfp_t gfp_mask)  { +	struct scsi_host_cmd_pool *pool = shost->cmd_pool;  	struct scsi_cmnd *cmd; -	cmd = scsi_pool_alloc_command(shost->cmd_pool, gfp_mask); +	cmd = kmem_cache_zalloc(pool->cmd_slab, gfp_mask | pool->gfp_mask);  	if (!cmd) -		return NULL; +		goto fail; + +	cmd->sense_buffer = kmem_cache_alloc(pool->sense_slab, +					     gfp_mask | pool->gfp_mask); +	if (!cmd->sense_buffer) +		goto fail_free_cmd;  	if (scsi_host_get_prot(shost) >= SHOST_DIX_TYPE0_PROTECTION) {  		cmd->prot_sdb = kmem_cache_zalloc(scsi_sdb_cache, gfp_mask); - -		if (!cmd->prot_sdb) { -			scsi_pool_free_command(shost->cmd_pool, cmd); -			return NULL; -		} +		if (!cmd->prot_sdb) +			goto fail_free_sense;  	}  	return cmd; + +fail_free_sense: +	kmem_cache_free(pool->sense_slab, cmd->sense_buffer); +fail_free_cmd: +	kmem_cache_free(pool->cmd_slab, cmd); +fail: +	return NULL;  }  /** @@ -289,26 +276,19 @@ EXPORT_SYMBOL_GPL(__scsi_get_command);   */  struct scsi_cmnd *scsi_get_command(struct scsi_device *dev, gfp_t gfp_mask)  { -	struct scsi_cmnd *cmd; +	struct scsi_cmnd *cmd = __scsi_get_command(dev->host, gfp_mask); +	unsigned long flags; -	/* Bail if we can't get a reference to the device */ -	if (!get_device(&dev->sdev_gendev)) +	if (unlikely(cmd == NULL))  		return NULL; -	cmd = __scsi_get_command(dev->host, gfp_mask); - -	if (likely(cmd != NULL)) { -		unsigned long flags; - -		cmd->device = dev; -		INIT_LIST_HEAD(&cmd->list); -		spin_lock_irqsave(&dev->list_lock, flags); -		list_add_tail(&cmd->list, &dev->cmd_list); -		spin_unlock_irqrestore(&dev->list_lock, flags); -		cmd->jiffies_at_alloc = jiffies; -	} else -		put_device(&dev->sdev_gendev); - +	cmd->device = dev; +	INIT_LIST_HEAD(&cmd->list); +	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); +	spin_lock_irqsave(&dev->list_lock, flags); +	list_add_tail(&cmd->list, &dev->cmd_list); +	spin_unlock_irqrestore(&dev->list_lock, flags); +	cmd->jiffies_at_alloc = jiffies;  	return cmd;  }  EXPORT_SYMBOL(scsi_get_command); @@ -317,25 +297,22 @@ EXPORT_SYMBOL(scsi_get_command);   * __scsi_put_command - Free a struct scsi_cmnd   * @shost: dev->host   * @cmd: Command to free - * @dev: parent scsi device   */ -void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd, -			struct device *dev) +void __scsi_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)  {  	unsigned long flags; -	/* changing locks here, don't need to restore the irq state */ -	spin_lock_irqsave(&shost->free_list_lock, flags);  	if (unlikely(list_empty(&shost->free_list))) { -		list_add(&cmd->list, &shost->free_list); -		cmd = NULL; +		spin_lock_irqsave(&shost->free_list_lock, flags); +		if (list_empty(&shost->free_list)) { +			list_add(&cmd->list, &shost->free_list); +			cmd = NULL; +		} +		spin_unlock_irqrestore(&shost->free_list_lock, flags);  	} -	spin_unlock_irqrestore(&shost->free_list_lock, flags);  	if (likely(cmd != NULL)) -		scsi_pool_free_command(shost->cmd_pool, cmd); - -	put_device(dev); +		scsi_host_free_command(shost, cmd);  }  EXPORT_SYMBOL(__scsi_put_command); @@ -349,7 +326,6 @@ EXPORT_SYMBOL(__scsi_put_command);   */  void scsi_put_command(struct scsi_cmnd *cmd)  { -	struct scsi_device *sdev = cmd->device;  	unsigned long flags;  	/* serious error if the command hasn't come from a device list */ @@ -358,50 +334,109 @@ void scsi_put_command(struct scsi_cmnd *cmd)  	list_del_init(&cmd->list);  	spin_unlock_irqrestore(&cmd->device->list_lock, flags); -	__scsi_put_command(cmd->device->host, cmd, &sdev->sdev_gendev); +	cancel_delayed_work(&cmd->abort_work); + +	__scsi_put_command(cmd->device->host, cmd);  }  EXPORT_SYMBOL(scsi_put_command); -static struct scsi_host_cmd_pool *scsi_get_host_cmd_pool(gfp_t gfp_mask) +static struct scsi_host_cmd_pool * +scsi_find_host_cmd_pool(struct Scsi_Host *shost) +{ +	if (shost->hostt->cmd_size) +		return shost->hostt->cmd_pool; +	if (shost->unchecked_isa_dma) +		return &scsi_cmd_dma_pool; +	return &scsi_cmd_pool; +} + +static void +scsi_free_host_cmd_pool(struct scsi_host_cmd_pool *pool) +{ +	kfree(pool->sense_name); +	kfree(pool->cmd_name); +	kfree(pool); +} + +static struct scsi_host_cmd_pool * +scsi_alloc_host_cmd_pool(struct Scsi_Host *shost) +{ +	struct scsi_host_template *hostt = shost->hostt; +	struct scsi_host_cmd_pool *pool; + +	pool = kzalloc(sizeof(*pool), GFP_KERNEL); +	if (!pool) +		return NULL; + +	pool->cmd_name = kasprintf(GFP_KERNEL, "%s_cmd", hostt->name); +	pool->sense_name = kasprintf(GFP_KERNEL, "%s_sense", hostt->name); +	if (!pool->cmd_name || !pool->sense_name) { +		scsi_free_host_cmd_pool(pool); +		return NULL; +	} + +	pool->slab_flags = SLAB_HWCACHE_ALIGN; +	if (shost->unchecked_isa_dma) { +		pool->slab_flags |= SLAB_CACHE_DMA; +		pool->gfp_mask = __GFP_DMA; +	} +	return pool; +} + +static struct scsi_host_cmd_pool * +scsi_get_host_cmd_pool(struct Scsi_Host *shost)  { +	struct scsi_host_template *hostt = shost->hostt;  	struct scsi_host_cmd_pool *retval = NULL, *pool; +	size_t cmd_size = sizeof(struct scsi_cmnd) + hostt->cmd_size; +  	/*  	 * Select a command slab for this host and create it if not  	 * yet existent.  	 */  	mutex_lock(&host_cmd_pool_mutex); -	pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : -		&scsi_cmd_pool; +	pool = scsi_find_host_cmd_pool(shost); +	if (!pool) { +		pool = scsi_alloc_host_cmd_pool(shost); +		if (!pool) +			goto out; +	} +  	if (!pool->users) { -		pool->cmd_slab = kmem_cache_create(pool->cmd_name, -						   sizeof(struct scsi_cmnd), 0, +		pool->cmd_slab = kmem_cache_create(pool->cmd_name, cmd_size, 0,  						   pool->slab_flags, NULL);  		if (!pool->cmd_slab) -			goto fail; +			goto out_free_pool;  		pool->sense_slab = kmem_cache_create(pool->sense_name,  						     SCSI_SENSE_BUFFERSIZE, 0,  						     pool->slab_flags, NULL); -		if (!pool->sense_slab) { -			kmem_cache_destroy(pool->cmd_slab); -			goto fail; -		} +		if (!pool->sense_slab) +			goto out_free_slab;  	}  	pool->users++;  	retval = pool; - fail: +out:  	mutex_unlock(&host_cmd_pool_mutex);  	return retval; + +out_free_slab: +	kmem_cache_destroy(pool->cmd_slab); +out_free_pool: +	if (hostt->cmd_size) +		scsi_free_host_cmd_pool(pool); +	goto out;  } -static void scsi_put_host_cmd_pool(gfp_t gfp_mask) +static void scsi_put_host_cmd_pool(struct Scsi_Host *shost)  { +	struct scsi_host_template *hostt = shost->hostt;  	struct scsi_host_cmd_pool *pool;  	mutex_lock(&host_cmd_pool_mutex); -	pool = (gfp_mask & __GFP_DMA) ? &scsi_cmd_dma_pool : -		&scsi_cmd_pool; +	pool = scsi_find_host_cmd_pool(shost); +  	/*  	 * This may happen if a driver has a mismatched get and put  	 * of the command pool; the driver should be implicated in @@ -412,67 +447,13 @@ static void scsi_put_host_cmd_pool(gfp_t gfp_mask)  	if (!--pool->users) {  		kmem_cache_destroy(pool->cmd_slab);  		kmem_cache_destroy(pool->sense_slab); +		if (hostt->cmd_size) +			scsi_free_host_cmd_pool(pool);  	}  	mutex_unlock(&host_cmd_pool_mutex);  }  /** - * scsi_allocate_command - get a fully allocated SCSI command - * @gfp_mask:	allocation mask - * - * This function is for use outside of the normal host based pools. - * It allocates the relevant command and takes an additional reference - * on the pool it used.  This function *must* be paired with - * scsi_free_command which also has the identical mask, otherwise the - * free pool counts will eventually go wrong and you'll trigger a bug. - * - * This function should *only* be used by drivers that need a static - * command allocation at start of day for internal functions. - */ -struct scsi_cmnd *scsi_allocate_command(gfp_t gfp_mask) -{ -	struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask); - -	if (!pool) -		return NULL; - -	return scsi_pool_alloc_command(pool, gfp_mask); -} -EXPORT_SYMBOL(scsi_allocate_command); - -/** - * scsi_free_command - free a command allocated by scsi_allocate_command - * @gfp_mask:	mask used in the original allocation - * @cmd:	command to free - * - * Note: using the original allocation mask is vital because that's - * what determines which command pool we use to free the command.  Any - * mismatch will cause the system to BUG eventually. - */ -void scsi_free_command(gfp_t gfp_mask, struct scsi_cmnd *cmd) -{ -	struct scsi_host_cmd_pool *pool = scsi_get_host_cmd_pool(gfp_mask); - -	/* -	 * this could trigger if the mask to scsi_allocate_command -	 * doesn't match this mask.  Otherwise we're guaranteed that this -	 * succeeds because scsi_allocate_command must have taken a reference -	 * on the pool -	 */ -	BUG_ON(!pool); - -	scsi_pool_free_command(pool, cmd); -	/* -	 * scsi_put_host_cmd_pool is called twice; once to release the -	 * reference we took above, and once to release the reference -	 * originally taken by scsi_allocate_command -	 */ -	scsi_put_host_cmd_pool(gfp_mask); -	scsi_put_host_cmd_pool(gfp_mask); -} -EXPORT_SYMBOL(scsi_free_command); - -/**   * scsi_setup_command_freelist - Setup the command freelist for a scsi host.   * @shost: host to allocate the freelist for.   * @@ -484,14 +465,13 @@ EXPORT_SYMBOL(scsi_free_command);   */  int scsi_setup_command_freelist(struct Scsi_Host *shost)  { -	struct scsi_cmnd *cmd;  	const gfp_t gfp_mask = shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL; +	struct scsi_cmnd *cmd;  	spin_lock_init(&shost->free_list_lock);  	INIT_LIST_HEAD(&shost->free_list); -	shost->cmd_pool = scsi_get_host_cmd_pool(gfp_mask); - +	shost->cmd_pool = scsi_get_host_cmd_pool(shost);  	if (!shost->cmd_pool)  		return -ENOMEM; @@ -500,7 +480,7 @@ int scsi_setup_command_freelist(struct Scsi_Host *shost)  	 */  	cmd = scsi_host_alloc_command(shost, gfp_mask);  	if (!cmd) { -		scsi_put_host_cmd_pool(gfp_mask); +		scsi_put_host_cmd_pool(shost);  		shost->cmd_pool = NULL;  		return -ENOMEM;  	} @@ -526,10 +506,10 @@ void scsi_destroy_command_freelist(struct Scsi_Host *shost)  		cmd = list_entry(shost->free_list.next, struct scsi_cmnd, list);  		list_del_init(&cmd->list); -		scsi_pool_free_command(shost->cmd_pool, cmd); +		scsi_host_free_command(shost, cmd);  	}  	shost->cmd_pool = NULL; -	scsi_put_host_cmd_pool(shost->unchecked_isa_dma ? GFP_DMA : GFP_KERNEL); +	scsi_put_host_cmd_pool(shost);  }  #ifdef CONFIG_SCSI_LOGGING @@ -658,7 +638,6 @@ EXPORT_SYMBOL(scsi_cmd_get_serial);  int scsi_dispatch_cmd(struct scsi_cmnd *cmd)  {  	struct Scsi_Host *host = cmd->device->host; -	unsigned long timeout;  	int rtn = 0;  	atomic_inc(&cmd->device->iorequest_cnt); @@ -704,28 +683,6 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)  			       (cmd->device->lun << 5 & 0xe0);  	} -	/* -	 * We will wait MIN_RESET_DELAY clock ticks after the last reset so -	 * we can avoid the drive not being ready. -	 */ -	timeout = host->last_reset + MIN_RESET_DELAY; - -	if (host->resetting && time_before(jiffies, timeout)) { -		int ticks_remaining = timeout - jiffies; -		/* -		 * NOTE: This may be executed from within an interrupt -		 * handler!  This is bad, but for now, it'll do.  The irq -		 * level of the interrupt handler has been masked out by the -		 * platform dependent interrupt handling code already, so the -		 * sti() here will not cause another call to the SCSI host's -		 * interrupt handler (assuming there is one irq-level per -		 * host). -		 */ -		while (--ticks_remaining >= 0) -			mdelay(1 + 999 / HZ); -		host->resetting = 0; -	} -  	scsi_log_send(cmd);  	/* @@ -770,15 +727,13 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)  }  /** - * scsi_done - Enqueue the finished SCSI command into the done queue. + * scsi_done - Invoke completion on finished SCSI command.   * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives   * ownership back to SCSI Core -- i.e. the LLDD has finished with it.   *   * Description: This function is the mid-level's (SCSI Core) interrupt routine,   * which regains ownership of the SCSI command (de facto) from a LLDD, and - * enqueues the command to the done queue for further processing. - * - * This is the producer of the done queue who enqueues at the tail. + * calls blk_complete_request() for further processing.   *   * This function is interrupt context safe.   */ @@ -981,7 +936,7 @@ EXPORT_SYMBOL(scsi_track_queue_full);   * This is an internal helper function.  You probably want to use   * scsi_get_vpd_page instead.   * - * Returns 0 on success or a negative error number. + * Returns size of the vpd page on success or a negative error number.   */  static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,  							u8 page, unsigned len) @@ -989,6 +944,9 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,  	int result;  	unsigned char cmd[16]; +	if (len < 4) +		return -EINVAL; +  	cmd[0] = INQUIRY;  	cmd[1] = 1;		/* EVPD */  	cmd[2] = page; @@ -1003,13 +961,13 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,  	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,  				  len, NULL, 30 * HZ, 3, NULL);  	if (result) -		return result; +		return -EIO;  	/* Sanity check that we got the page back that we asked for */  	if (buffer[1] != page)  		return -EIO; -	return 0; +	return get_unaligned_be16(&buffer[2]) + 4;  }  /** @@ -1036,18 +994,18 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,  	/* Ask for all the pages supported by this device */  	result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); -	if (result) +	if (result < 4)  		goto fail;  	/* If the user actually wanted this page, we can skip the rest */  	if (page == 0)  		return 0; -	for (i = 0; i < min((int)buf[3], buf_len - 4); i++) -		if (buf[i + 4] == page) +	for (i = 4; i < min(result, buf_len); i++) +		if (buf[i] == page)  			goto found; -	if (i < buf[3] && i >= buf_len - 4) +	if (i < result && i >= buf_len)  		/* ran off the end of the buffer, give us benefit of doubt */  		goto found;  	/* The device claims it doesn't support the requested page */ @@ -1055,7 +1013,7 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,   found:  	result = scsi_vpd_inquiry(sdev, buf, page, buf_len); -	if (result) +	if (result < 0)  		goto fail;  	return 0; @@ -1066,6 +1024,93 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,  EXPORT_SYMBOL_GPL(scsi_get_vpd_page);  /** + * scsi_attach_vpd - Attach Vital Product Data to a SCSI device structure + * @sdev: The device to ask + * + * Attach the 'Device Identification' VPD page (0x83) and the + * 'Unit Serial Number' VPD page (0x80) to a SCSI device + * structure. This information can be used to identify the device + * uniquely. + */ +void scsi_attach_vpd(struct scsi_device *sdev) +{ +	int result, i; +	int vpd_len = SCSI_VPD_PG_LEN; +	int pg80_supported = 0; +	int pg83_supported = 0; +	unsigned char *vpd_buf; + +	if (sdev->skip_vpd_pages) +		return; +retry_pg0: +	vpd_buf = kmalloc(vpd_len, GFP_KERNEL); +	if (!vpd_buf) +		return; + +	/* Ask for all the pages supported by this device */ +	result = scsi_vpd_inquiry(sdev, vpd_buf, 0, vpd_len); +	if (result < 0) { +		kfree(vpd_buf); +		return; +	} +	if (result > vpd_len) { +		vpd_len = result; +		kfree(vpd_buf); +		goto retry_pg0; +	} + +	for (i = 4; i < result; i++) { +		if (vpd_buf[i] == 0x80) +			pg80_supported = 1; +		if (vpd_buf[i] == 0x83) +			pg83_supported = 1; +	} +	kfree(vpd_buf); +	vpd_len = SCSI_VPD_PG_LEN; + +	if (pg80_supported) { +retry_pg80: +		vpd_buf = kmalloc(vpd_len, GFP_KERNEL); +		if (!vpd_buf) +			return; + +		result = scsi_vpd_inquiry(sdev, vpd_buf, 0x80, vpd_len); +		if (result < 0) { +			kfree(vpd_buf); +			return; +		} +		if (result > vpd_len) { +			vpd_len = result; +			kfree(vpd_buf); +			goto retry_pg80; +		} +		sdev->vpd_pg80_len = result; +		sdev->vpd_pg80 = vpd_buf; +		vpd_len = SCSI_VPD_PG_LEN; +	} + +	if (pg83_supported) { +retry_pg83: +		vpd_buf = kmalloc(vpd_len, GFP_KERNEL); +		if (!vpd_buf) +			return; + +		result = scsi_vpd_inquiry(sdev, vpd_buf, 0x83, vpd_len); +		if (result < 0) { +			kfree(vpd_buf); +			return; +		} +		if (result > vpd_len) { +			vpd_len = result; +			kfree(vpd_buf); +			goto retry_pg83; +		} +		sdev->vpd_pg83_len = result; +		sdev->vpd_pg83 = vpd_buf; +	} +} + +/**   * scsi_report_opcode - Find out if a given command opcode is supported   * @sdev:	scsi device to query   * @buffer:	scratch buffer (must be at least 20 bytes long) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 01c0ffa3127..1328a262107 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -64,6 +64,7 @@ static const char * scsi_debug_version_date = "20100324";  /* Additional Sense Code (ASC) */  #define NO_ADDITIONAL_SENSE 0x0  #define LOGICAL_UNIT_NOT_READY 0x4 +#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8  #define UNRECOVERED_READ_ERR 0x11  #define PARAMETER_LIST_LENGTH_ERR 0x1a  #define INVALID_OPCODE 0x20 @@ -129,6 +130,7 @@ static const char * scsi_debug_version_date = "20100324";  #define SCSI_DEBUG_OPT_DIF_ERR   32  #define SCSI_DEBUG_OPT_DIX_ERR   64  #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128 +#define SCSI_DEBUG_OPT_SHORT_TRANSFER	256  /* When "every_nth" > 0 then modulo "every_nth" commands:   *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set   *   - a RECOVERED_ERROR is simulated on successful read and write @@ -169,7 +171,7 @@ static int scsi_debug_dix = DEF_DIX;  static int scsi_debug_dsense = DEF_D_SENSE;  static int scsi_debug_every_nth = DEF_EVERY_NTH;  static int scsi_debug_fake_rw = DEF_FAKE_RW; -static int scsi_debug_guard = DEF_GUARD; +static unsigned int scsi_debug_guard = DEF_GUARD;  static int scsi_debug_lowest_aligned = DEF_LOWEST_ALIGNED;  static int scsi_debug_max_luns = DEF_MAX_LUNS;  static int scsi_debug_max_queue = SCSI_DEBUG_CANQUEUE; @@ -195,6 +197,7 @@ static unsigned int scsi_debug_unmap_max_blocks = DEF_UNMAP_MAX_BLOCKS;  static unsigned int scsi_debug_unmap_max_desc = DEF_UNMAP_MAX_DESC;  static unsigned int scsi_debug_write_same_length = DEF_WRITESAME_LENGTH;  static bool scsi_debug_removable = DEF_REMOVABLE; +static bool scsi_debug_clustering;  static int scsi_debug_cmnd_count = 0; @@ -293,6 +296,20 @@ static unsigned char ctrl_m_pg[] = {0xa, 10, 2, 0, 0, 0, 0, 0,  static unsigned char iec_m_pg[] = {0x1c, 0xa, 0x08, 0, 0, 0, 0, 0,  			           0, 0, 0x0, 0x0}; +static void *fake_store(unsigned long long lba) +{ +	lba = do_div(lba, sdebug_store_sectors); + +	return fake_storep + lba * scsi_debug_sector_size; +} + +static struct sd_dif_tuple *dif_store(sector_t sector) +{ +	sector = do_div(sector, sdebug_store_sectors); + +	return dif_storep + sector; +} +  static int sdebug_add_adapter(void);  static void sdebug_remove_adapter(void); @@ -1731,25 +1748,22 @@ static int do_device_access(struct scsi_cmnd *scmd,  	return ret;  } -static u16 dif_compute_csum(const void *buf, int len) +static __be16 dif_compute_csum(const void *buf, int len)  { -	u16 csum; +	__be16 csum; -	switch (scsi_debug_guard) { -	case 1: -		csum = ip_compute_csum(buf, len); -		break; -	case 0: +	if (scsi_debug_guard) +		csum = (__force __be16)ip_compute_csum(buf, len); +	else  		csum = cpu_to_be16(crc_t10dif(buf, len)); -		break; -	} +  	return csum;  }  static int dif_verify(struct sd_dif_tuple *sdt, const void *data,  		      sector_t sector, u32 ei_lba)  { -	u16 csum = dif_compute_csum(data, scsi_debug_sector_size); +	__be16 csum = dif_compute_csum(data, scsi_debug_sector_size);  	if (sdt->guard_tag != csum) {  		pr_err("%s: GUARD check failed on sector %lu rcvd 0x%04x, data 0x%04x\n", @@ -1769,65 +1783,78 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,  	    be32_to_cpu(sdt->ref_tag) != ei_lba) {  		pr_err("%s: REF check failed on sector %lu\n",  			__func__, (unsigned long)sector); -			dif_errors++;  		return 0x03;  	}  	return 0;  } -static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, -			    unsigned int sectors, u32 ei_lba) +static void dif_copy_prot(struct scsi_cmnd *SCpnt, sector_t sector, +			  unsigned int sectors, bool read)  { -	unsigned int i, resid; -	struct scatterlist *psgl; -	struct sd_dif_tuple *sdt; -	sector_t sector; -	sector_t tmp_sec = start_sec; +	size_t resid;  	void *paddr; +	const void *dif_store_end = dif_storep + sdebug_store_sectors; +	struct sg_mapping_iter miter; -	start_sec = do_div(tmp_sec, sdebug_store_sectors); +	/* Bytes of protection data to copy into sgl */ +	resid = sectors * sizeof(*dif_storep); -	sdt = dif_storep + start_sec; +	sg_miter_start(&miter, scsi_prot_sglist(SCpnt), +			scsi_prot_sg_count(SCpnt), SG_MITER_ATOMIC | +			(read ? SG_MITER_TO_SG : SG_MITER_FROM_SG)); -	for (i = 0 ; i < sectors ; i++) { -		int ret; +	while (sg_miter_next(&miter) && resid > 0) { +		size_t len = min(miter.length, resid); +		void *start = dif_store(sector); +		size_t rest = 0; -		if (sdt[i].app_tag == 0xffff) -			continue; +		if (dif_store_end < start + len) +			rest = start + len - dif_store_end; -		sector = start_sec + i; +		paddr = miter.addr; -		ret = dif_verify(&sdt[i], -				 fake_storep + sector * scsi_debug_sector_size, -				 sector, ei_lba); -		if (ret) { -			dif_errors++; -			return ret; +		if (read) +			memcpy(paddr, start, len - rest); +		else +			memcpy(start, paddr, len - rest); + +		if (rest) { +			if (read) +				memcpy(paddr + len - rest, dif_storep, rest); +			else +				memcpy(dif_storep, paddr + len - rest, rest);  		} -		ei_lba++; +		sector += len / sizeof(*dif_storep); +		resid -= len;  	} +	sg_miter_stop(&miter); +} -	/* Bytes of protection data to copy into sgl */ -	resid = sectors * sizeof(*dif_storep); -	sector = start_sec; +static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, +			    unsigned int sectors, u32 ei_lba) +{ +	unsigned int i; +	struct sd_dif_tuple *sdt; +	sector_t sector; -	scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { -		int len = min(psgl->length, resid); +	for (i = 0; i < sectors; i++, ei_lba++) { +		int ret; -		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; -		memcpy(paddr, dif_storep + sector, len); +		sector = start_sec + i; +		sdt = dif_store(sector); -		sector += len / sizeof(*dif_storep); -		if (sector >= sdebug_store_sectors) { -			/* Force wrap */ -			tmp_sec = sector; -			sector = do_div(tmp_sec, sdebug_store_sectors); +		if (sdt->app_tag == cpu_to_be16(0xffff)) +			continue; + +		ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); +		if (ret) { +			dif_errors++; +			return ret;  		} -		resid -= len; -		kunmap_atomic(paddr);  	} +	dif_copy_prot(SCpnt, start_sec, sectors, true);  	dix_reads++;  	return 0; @@ -1863,17 +1890,19 @@ static int resp_read(struct scsi_cmnd *SCpnt, unsigned long long lba,  		return check_condition_result;  	} +	read_lock_irqsave(&atomic_rw, iflags); +  	/* DIX + T10 DIF */  	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {  		int prot_ret = prot_verify_read(SCpnt, lba, num, ei_lba);  		if (prot_ret) { +			read_unlock_irqrestore(&atomic_rw, iflags);  			mk_sense_buffer(devip, ABORTED_COMMAND, 0x10, prot_ret);  			return illegal_condition_result;  		}  	} -	read_lock_irqsave(&atomic_rw, iflags);  	ret = do_device_access(SCpnt, devip, lba, num, 0);  	read_unlock_irqrestore(&atomic_rw, iflags);  	if (ret == -1) @@ -1908,79 +1937,72 @@ void dump_sector(unsigned char *buf, int len)  static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,  			     unsigned int sectors, u32 ei_lba)  { -	int i, j, ret; +	int ret;  	struct sd_dif_tuple *sdt; -	struct scatterlist *dsgl = scsi_sglist(SCpnt); -	struct scatterlist *psgl = scsi_prot_sglist(SCpnt); -	void *daddr, *paddr; -	sector_t tmp_sec = start_sec; -	sector_t sector; +	void *daddr; +	sector_t sector = start_sec;  	int ppage_offset; - -	sector = do_div(tmp_sec, sdebug_store_sectors); +	int dpage_offset; +	struct sg_mapping_iter diter; +	struct sg_mapping_iter piter;  	BUG_ON(scsi_sg_count(SCpnt) == 0);  	BUG_ON(scsi_prot_sg_count(SCpnt) == 0); -	ppage_offset = 0; - -	/* For each data page */ -	scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) { -		daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset; -		paddr = kmap_atomic(sg_page(psgl)) + psgl->offset; - -		/* For each sector-sized chunk in data page */ -		for (j = 0; j < dsgl->length; j += scsi_debug_sector_size) { +	sg_miter_start(&piter, scsi_prot_sglist(SCpnt), +			scsi_prot_sg_count(SCpnt), +			SG_MITER_ATOMIC | SG_MITER_FROM_SG); +	sg_miter_start(&diter, scsi_sglist(SCpnt), scsi_sg_count(SCpnt), +			SG_MITER_ATOMIC | SG_MITER_FROM_SG); + +	/* For each protection page */ +	while (sg_miter_next(&piter)) { +		dpage_offset = 0; +		if (WARN_ON(!sg_miter_next(&diter))) { +			ret = 0x01; +			goto out; +		} +		for (ppage_offset = 0; ppage_offset < piter.length; +		     ppage_offset += sizeof(struct sd_dif_tuple)) {  			/* If we're at the end of the current -			 * protection page advance to the next one +			 * data page advance to the next one  			 */ -			if (ppage_offset >= psgl->length) { -				kunmap_atomic(paddr); -				psgl = sg_next(psgl); -				BUG_ON(psgl == NULL); -				paddr = kmap_atomic(sg_page(psgl)) -					+ psgl->offset; -				ppage_offset = 0; +			if (dpage_offset >= diter.length) { +				if (WARN_ON(!sg_miter_next(&diter))) { +					ret = 0x01; +					goto out; +				} +				dpage_offset = 0;  			} -			sdt = paddr + ppage_offset; +			sdt = piter.addr + ppage_offset; +			daddr = diter.addr + dpage_offset; -			ret = dif_verify(sdt, daddr + j, start_sec, ei_lba); +			ret = dif_verify(sdt, daddr, sector, ei_lba);  			if (ret) { -				dump_sector(daddr + j, scsi_debug_sector_size); +				dump_sector(daddr, scsi_debug_sector_size);  				goto out;  			} -			/* Would be great to copy this in bigger -			 * chunks.  However, for the sake of -			 * correctness we need to verify each sector -			 * before writing it to "stable" storage -			 */ -			memcpy(dif_storep + sector, sdt, sizeof(*sdt)); -  			sector++; - -			if (sector == sdebug_store_sectors) -				sector = 0;	/* Force wrap */ - -			start_sec++;  			ei_lba++; -			ppage_offset += sizeof(struct sd_dif_tuple); +			dpage_offset += scsi_debug_sector_size;  		} - -		kunmap_atomic(paddr); -		kunmap_atomic(daddr); +		diter.consumed = dpage_offset; +		sg_miter_stop(&diter);  	} +	sg_miter_stop(&piter); +	dif_copy_prot(SCpnt, start_sec, sectors, false);  	dix_writes++;  	return 0;  out:  	dif_errors++; -	kunmap_atomic(paddr); -	kunmap_atomic(daddr); +	sg_miter_stop(&diter); +	sg_miter_stop(&piter);  	return ret;  } @@ -2080,17 +2102,19 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba,  	if (ret)  		return ret; +	write_lock_irqsave(&atomic_rw, iflags); +  	/* DIX + T10 DIF */  	if (scsi_debug_dix && scsi_prot_sg_count(SCpnt)) {  		int prot_ret = prot_verify_write(SCpnt, lba, num, ei_lba);  		if (prot_ret) { +			write_unlock_irqrestore(&atomic_rw, iflags);  			mk_sense_buffer(devip, ILLEGAL_REQUEST, 0x10, prot_ret);  			return illegal_condition_result;  		}  	} -	write_lock_irqsave(&atomic_rw, iflags);  	ret = do_device_access(SCpnt, devip, lba, num, 1);  	if (scsi_debug_lbp())  		map_region(lba, num); @@ -2169,6 +2193,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)  	struct unmap_block_desc *desc;  	unsigned int i, payload_len, descriptors;  	int ret; +	unsigned long iflags;  	ret = check_readiness(scmd, 1, devip);  	if (ret) @@ -2190,6 +2215,8 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)  	desc = (void *)&buf[8]; +	write_lock_irqsave(&atomic_rw, iflags); +  	for (i = 0 ; i < descriptors ; i++) {  		unsigned long long lba = get_unaligned_be64(&desc[i].lba);  		unsigned int num = get_unaligned_be32(&desc[i].blocks); @@ -2204,6 +2231,7 @@ static int resp_unmap(struct scsi_cmnd * scmd, struct sdebug_dev_info * devip)  	ret = 0;  out: +	write_unlock_irqrestore(&atomic_rw, iflags);  	kfree(buf);  	return ret; @@ -2304,36 +2332,37 @@ static int resp_report_luns(struct scsi_cmnd * scp,  static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,  			    unsigned int num, struct sdebug_dev_info *devip)  { -	int i, j, ret = -1; +	int j;  	unsigned char *kaddr, *buf;  	unsigned int offset; -	struct scatterlist *sg;  	struct scsi_data_buffer *sdb = scsi_in(scp); +	struct sg_mapping_iter miter;  	/* better not to use temporary buffer. */  	buf = kmalloc(scsi_bufflen(scp), GFP_ATOMIC); -	if (!buf) -		return ret; +	if (!buf) { +		mk_sense_buffer(devip, NOT_READY, +				LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); +		return check_condition_result; +	}  	scsi_sg_copy_to_buffer(scp, buf, scsi_bufflen(scp));  	offset = 0; -	for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { -		kaddr = (unsigned char *)kmap_atomic(sg_page(sg)); -		if (!kaddr) -			goto out; +	sg_miter_start(&miter, sdb->table.sgl, sdb->table.nents, +			SG_MITER_ATOMIC | SG_MITER_TO_SG); -		for (j = 0; j < sg->length; j++) -			*(kaddr + sg->offset + j) ^= *(buf + offset + j); +	while (sg_miter_next(&miter)) { +		kaddr = miter.addr; +		for (j = 0; j < miter.length; j++) +			*(kaddr + j) ^= *(buf + offset + j); -		offset += sg->length; -		kunmap_atomic(kaddr); +		offset += miter.length;  	} -	ret = 0; -out: +	sg_miter_stop(&miter);  	kfree(buf); -	return ret; +	return 0;  }  /* When timer goes off this function is called. */ @@ -2735,6 +2764,7 @@ static int schedule_resp(struct scsi_cmnd * cmnd,   */  module_param_named(add_host, scsi_debug_add_host, int, S_IRUGO | S_IWUSR);  module_param_named(ato, scsi_debug_ato, int, S_IRUGO); +module_param_named(clustering, scsi_debug_clustering, bool, S_IRUGO | S_IWUSR);  module_param_named(delay, scsi_debug_delay, int, S_IRUGO | S_IWUSR);  module_param_named(dev_size_mb, scsi_debug_dev_size_mb, int, S_IRUGO);  module_param_named(dif, scsi_debug_dif, int, S_IRUGO); @@ -2742,7 +2772,7 @@ module_param_named(dix, scsi_debug_dix, int, S_IRUGO);  module_param_named(dsense, scsi_debug_dsense, int, S_IRUGO | S_IWUSR);  module_param_named(every_nth, scsi_debug_every_nth, int, S_IRUGO | S_IWUSR);  module_param_named(fake_rw, scsi_debug_fake_rw, int, S_IRUGO | S_IWUSR); -module_param_named(guard, scsi_debug_guard, int, S_IRUGO); +module_param_named(guard, scsi_debug_guard, uint, S_IRUGO);  module_param_named(lbpu, scsi_debug_lbpu, int, S_IRUGO);  module_param_named(lbpws, scsi_debug_lbpws, int, S_IRUGO);  module_param_named(lbpws10, scsi_debug_lbpws10, int, S_IRUGO); @@ -2778,6 +2808,7 @@ MODULE_VERSION(SCSI_DEBUG_VERSION);  MODULE_PARM_DESC(add_host, "0..127 hosts allowed(def=1)");  MODULE_PARM_DESC(ato, "application tag ownership: 0=disk 1=host (def=1)"); +MODULE_PARM_DESC(clustering, "when set enables larger transfers (def=0)");  MODULE_PARM_DESC(delay, "# of jiffies to delay response(def=1)");  MODULE_PARM_DESC(dev_size_mb, "size in MB of ram shared by devs(def=8)");  MODULE_PARM_DESC(dif, "data integrity field type: 0-3 (def=0)"); @@ -2864,13 +2895,13 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)  	return 0;  } -static ssize_t sdebug_delay_show(struct device_driver * ddp, char * buf) +static ssize_t delay_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_delay);  } -static ssize_t sdebug_delay_store(struct device_driver * ddp, -				  const char * buf, size_t count) +static ssize_t delay_store(struct device_driver *ddp, const char *buf, +			   size_t count)  {          int delay;  	char work[20]; @@ -2883,16 +2914,15 @@ static ssize_t sdebug_delay_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(delay, S_IRUGO | S_IWUSR, sdebug_delay_show, -	    sdebug_delay_store); +static DRIVER_ATTR_RW(delay); -static ssize_t sdebug_opts_show(struct device_driver * ddp, char * buf) +static ssize_t opts_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "0x%x\n", scsi_debug_opts);  } -static ssize_t sdebug_opts_store(struct device_driver * ddp, -				 const char * buf, size_t count) +static ssize_t opts_store(struct device_driver *ddp, const char *buf, +			  size_t count)  {          int opts;  	char work[20]; @@ -2912,15 +2942,14 @@ opts_done:  	scsi_debug_cmnd_count = 0;  	return count;  } -DRIVER_ATTR(opts, S_IRUGO | S_IWUSR, sdebug_opts_show, -	    sdebug_opts_store); +static DRIVER_ATTR_RW(opts); -static ssize_t sdebug_ptype_show(struct device_driver * ddp, char * buf) +static ssize_t ptype_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ptype);  } -static ssize_t sdebug_ptype_store(struct device_driver * ddp, -				  const char * buf, size_t count) +static ssize_t ptype_store(struct device_driver *ddp, const char *buf, +			   size_t count)  {          int n; @@ -2930,14 +2959,14 @@ static ssize_t sdebug_ptype_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(ptype, S_IRUGO | S_IWUSR, sdebug_ptype_show, sdebug_ptype_store); +static DRIVER_ATTR_RW(ptype); -static ssize_t sdebug_dsense_show(struct device_driver * ddp, char * buf) +static ssize_t dsense_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dsense);  } -static ssize_t sdebug_dsense_store(struct device_driver * ddp, -				  const char * buf, size_t count) +static ssize_t dsense_store(struct device_driver *ddp, const char *buf, +			    size_t count)  {          int n; @@ -2947,15 +2976,14 @@ static ssize_t sdebug_dsense_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(dsense, S_IRUGO | S_IWUSR, sdebug_dsense_show, -	    sdebug_dsense_store); +static DRIVER_ATTR_RW(dsense); -static ssize_t sdebug_fake_rw_show(struct device_driver * ddp, char * buf) +static ssize_t fake_rw_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_fake_rw);  } -static ssize_t sdebug_fake_rw_store(struct device_driver * ddp, -				    const char * buf, size_t count) +static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, +			     size_t count)  {          int n; @@ -2965,15 +2993,14 @@ static ssize_t sdebug_fake_rw_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(fake_rw, S_IRUGO | S_IWUSR, sdebug_fake_rw_show, -	    sdebug_fake_rw_store); +static DRIVER_ATTR_RW(fake_rw); -static ssize_t sdebug_no_lun_0_show(struct device_driver * ddp, char * buf) +static ssize_t no_lun_0_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_lun_0);  } -static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp, -				     const char * buf, size_t count) +static ssize_t no_lun_0_store(struct device_driver *ddp, const char *buf, +			      size_t count)  {          int n; @@ -2983,15 +3010,14 @@ static ssize_t sdebug_no_lun_0_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(no_lun_0, S_IRUGO | S_IWUSR, sdebug_no_lun_0_show, -	    sdebug_no_lun_0_store); +static DRIVER_ATTR_RW(no_lun_0); -static ssize_t sdebug_num_tgts_show(struct device_driver * ddp, char * buf) +static ssize_t num_tgts_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_tgts);  } -static ssize_t sdebug_num_tgts_store(struct device_driver * ddp, -				     const char * buf, size_t count) +static ssize_t num_tgts_store(struct device_driver *ddp, const char *buf, +			      size_t count)  {          int n; @@ -3002,27 +3028,26 @@ static ssize_t sdebug_num_tgts_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(num_tgts, S_IRUGO | S_IWUSR, sdebug_num_tgts_show, -	    sdebug_num_tgts_store); +static DRIVER_ATTR_RW(num_tgts); -static ssize_t sdebug_dev_size_mb_show(struct device_driver * ddp, char * buf) +static ssize_t dev_size_mb_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dev_size_mb);  } -DRIVER_ATTR(dev_size_mb, S_IRUGO, sdebug_dev_size_mb_show, NULL); +static DRIVER_ATTR_RO(dev_size_mb); -static ssize_t sdebug_num_parts_show(struct device_driver * ddp, char * buf) +static ssize_t num_parts_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_num_parts);  } -DRIVER_ATTR(num_parts, S_IRUGO, sdebug_num_parts_show, NULL); +static DRIVER_ATTR_RO(num_parts); -static ssize_t sdebug_every_nth_show(struct device_driver * ddp, char * buf) +static ssize_t every_nth_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_every_nth);  } -static ssize_t sdebug_every_nth_store(struct device_driver * ddp, -				      const char * buf, size_t count) +static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, +			       size_t count)  {          int nth; @@ -3033,15 +3058,14 @@ static ssize_t sdebug_every_nth_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(every_nth, S_IRUGO | S_IWUSR, sdebug_every_nth_show, -	    sdebug_every_nth_store); +static DRIVER_ATTR_RW(every_nth); -static ssize_t sdebug_max_luns_show(struct device_driver * ddp, char * buf) +static ssize_t max_luns_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_luns);  } -static ssize_t sdebug_max_luns_store(struct device_driver * ddp, -				     const char * buf, size_t count) +static ssize_t max_luns_store(struct device_driver *ddp, const char *buf, +			      size_t count)  {          int n; @@ -3052,15 +3076,14 @@ static ssize_t sdebug_max_luns_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(max_luns, S_IRUGO | S_IWUSR, sdebug_max_luns_show, -	    sdebug_max_luns_store); +static DRIVER_ATTR_RW(max_luns); -static ssize_t sdebug_max_queue_show(struct device_driver * ddp, char * buf) +static ssize_t max_queue_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_max_queue);  } -static ssize_t sdebug_max_queue_store(struct device_driver * ddp, -				      const char * buf, size_t count) +static ssize_t max_queue_store(struct device_driver *ddp, const char *buf, +			       size_t count)  {          int n; @@ -3071,27 +3094,26 @@ static ssize_t sdebug_max_queue_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(max_queue, S_IRUGO | S_IWUSR, sdebug_max_queue_show, -	    sdebug_max_queue_store); +static DRIVER_ATTR_RW(max_queue); -static ssize_t sdebug_no_uld_show(struct device_driver * ddp, char * buf) +static ssize_t no_uld_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_no_uld);  } -DRIVER_ATTR(no_uld, S_IRUGO, sdebug_no_uld_show, NULL); +static DRIVER_ATTR_RO(no_uld); -static ssize_t sdebug_scsi_level_show(struct device_driver * ddp, char * buf) +static ssize_t scsi_level_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_scsi_level);  } -DRIVER_ATTR(scsi_level, S_IRUGO, sdebug_scsi_level_show, NULL); +static DRIVER_ATTR_RO(scsi_level); -static ssize_t sdebug_virtual_gb_show(struct device_driver * ddp, char * buf) +static ssize_t virtual_gb_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_virtual_gb);  } -static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp, -				       const char * buf, size_t count) +static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf, +				size_t count)  {          int n; @@ -3104,16 +3126,15 @@ static ssize_t sdebug_virtual_gb_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(virtual_gb, S_IRUGO | S_IWUSR, sdebug_virtual_gb_show, -	    sdebug_virtual_gb_store); +static DRIVER_ATTR_RW(virtual_gb); -static ssize_t sdebug_add_host_show(struct device_driver * ddp, char * buf) +static ssize_t add_host_show(struct device_driver *ddp, char *buf)  {          return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_add_host);  } -static ssize_t sdebug_add_host_store(struct device_driver * ddp, -				     const char * buf, size_t count) +static ssize_t add_host_store(struct device_driver *ddp, const char *buf, +			      size_t count)  {  	int delta_hosts; @@ -3130,16 +3151,14 @@ static ssize_t sdebug_add_host_store(struct device_driver * ddp,  	}  	return count;  } -DRIVER_ATTR(add_host, S_IRUGO | S_IWUSR, sdebug_add_host_show, -	    sdebug_add_host_store); +static DRIVER_ATTR_RW(add_host); -static ssize_t sdebug_vpd_use_hostno_show(struct device_driver * ddp, -					  char * buf) +static ssize_t vpd_use_hostno_show(struct device_driver *ddp, char *buf)  {  	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_vpd_use_hostno);  } -static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp, -					   const char * buf, size_t count) +static ssize_t vpd_use_hostno_store(struct device_driver *ddp, const char *buf, +				    size_t count)  {  	int n; @@ -3149,40 +3168,39 @@ static ssize_t sdebug_vpd_use_hostno_store(struct device_driver * ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(vpd_use_hostno, S_IRUGO | S_IWUSR, sdebug_vpd_use_hostno_show, -	    sdebug_vpd_use_hostno_store); +static DRIVER_ATTR_RW(vpd_use_hostno); -static ssize_t sdebug_sector_size_show(struct device_driver * ddp, char * buf) +static ssize_t sector_size_show(struct device_driver *ddp, char *buf)  {  	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_sector_size);  } -DRIVER_ATTR(sector_size, S_IRUGO, sdebug_sector_size_show, NULL); +static DRIVER_ATTR_RO(sector_size); -static ssize_t sdebug_dix_show(struct device_driver *ddp, char *buf) +static ssize_t dix_show(struct device_driver *ddp, char *buf)  {  	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dix);  } -DRIVER_ATTR(dix, S_IRUGO, sdebug_dix_show, NULL); +static DRIVER_ATTR_RO(dix); -static ssize_t sdebug_dif_show(struct device_driver *ddp, char *buf) +static ssize_t dif_show(struct device_driver *ddp, char *buf)  {  	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_dif);  } -DRIVER_ATTR(dif, S_IRUGO, sdebug_dif_show, NULL); +static DRIVER_ATTR_RO(dif); -static ssize_t sdebug_guard_show(struct device_driver *ddp, char *buf) +static ssize_t guard_show(struct device_driver *ddp, char *buf)  { -	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_guard); +	return scnprintf(buf, PAGE_SIZE, "%u\n", scsi_debug_guard);  } -DRIVER_ATTR(guard, S_IRUGO, sdebug_guard_show, NULL); +static DRIVER_ATTR_RO(guard); -static ssize_t sdebug_ato_show(struct device_driver *ddp, char *buf) +static ssize_t ato_show(struct device_driver *ddp, char *buf)  {  	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_ato);  } -DRIVER_ATTR(ato, S_IRUGO, sdebug_ato_show, NULL); +static DRIVER_ATTR_RO(ato); -static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf) +static ssize_t map_show(struct device_driver *ddp, char *buf)  {  	ssize_t count; @@ -3197,15 +3215,14 @@ static ssize_t sdebug_map_show(struct device_driver *ddp, char *buf)  	return count;  } -DRIVER_ATTR(map, S_IRUGO, sdebug_map_show, NULL); +static DRIVER_ATTR_RO(map); -static ssize_t sdebug_removable_show(struct device_driver *ddp, -				     char *buf) +static ssize_t removable_show(struct device_driver *ddp, char *buf)  {  	return scnprintf(buf, PAGE_SIZE, "%d\n", scsi_debug_removable ? 1 : 0);  } -static ssize_t sdebug_removable_store(struct device_driver *ddp, -				      const char *buf, size_t count) +static ssize_t removable_store(struct device_driver *ddp, const char *buf, +			       size_t count)  {  	int n; @@ -3215,76 +3232,45 @@ static ssize_t sdebug_removable_store(struct device_driver *ddp,  	}  	return -EINVAL;  } -DRIVER_ATTR(removable, S_IRUGO | S_IWUSR, sdebug_removable_show, -	    sdebug_removable_store); - +static DRIVER_ATTR_RW(removable); -/* Note: The following function creates attribute files in the +/* Note: The following array creates attribute files in the     /sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these     files (over those found in the /sys/module/scsi_debug/parameters     directory) is that auxiliary actions can be triggered when an attribute     is changed. For example see: sdebug_add_host_store() above.   */ -static int do_create_driverfs_files(void) -{ -	int ret; -	ret = driver_create_file(&sdebug_driverfs_driver, &driver_attr_add_host); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_delay); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dsense); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_every_nth); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_fake_rw); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_luns); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_max_queue); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_no_uld); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_parts); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ptype); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_opts); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_removable); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_sector_size); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dix); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_dif); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_guard); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_ato); -	ret |= driver_create_file(&sdebug_driverfs_driver, &driver_attr_map); -	return ret; -} +static struct attribute *sdebug_drv_attrs[] = { +	&driver_attr_delay.attr, +	&driver_attr_opts.attr, +	&driver_attr_ptype.attr, +	&driver_attr_dsense.attr, +	&driver_attr_fake_rw.attr, +	&driver_attr_no_lun_0.attr, +	&driver_attr_num_tgts.attr, +	&driver_attr_dev_size_mb.attr, +	&driver_attr_num_parts.attr, +	&driver_attr_every_nth.attr, +	&driver_attr_max_luns.attr, +	&driver_attr_max_queue.attr, +	&driver_attr_no_uld.attr, +	&driver_attr_scsi_level.attr, +	&driver_attr_virtual_gb.attr, +	&driver_attr_add_host.attr, +	&driver_attr_vpd_use_hostno.attr, +	&driver_attr_sector_size.attr, +	&driver_attr_dix.attr, +	&driver_attr_dif.attr, +	&driver_attr_guard.attr, +	&driver_attr_ato.attr, +	&driver_attr_map.attr, +	&driver_attr_removable.attr, +	NULL, +}; +ATTRIBUTE_GROUPS(sdebug_drv); -static void do_remove_driverfs_files(void) -{ -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_map); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ato); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_guard); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dif); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dix); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_sector_size); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_vpd_use_hostno); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_virtual_gb); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_scsi_level); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_opts); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_ptype); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_removable); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_tgts); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_num_parts); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_uld); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_no_lun_0); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_queue); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_max_luns); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_fake_rw); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_every_nth); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dsense); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_dev_size_mb); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_delay); -	driver_remove_file(&sdebug_driverfs_driver, &driver_attr_add_host); -} - -struct device *pseudo_primary; +static struct device *pseudo_primary;  static int __init scsi_debug_init(void)  { @@ -3447,12 +3433,6 @@ static int __init scsi_debug_init(void)  			ret);  		goto bus_unreg;  	} -	ret = do_create_driverfs_files(); -	if (ret < 0) { -		printk(KERN_WARNING "scsi_debug: driver_create_file error: %d\n", -			ret); -		goto del_files; -	}  	init_all_queued(); @@ -3473,9 +3453,6 @@ static int __init scsi_debug_init(void)  	}  	return 0; -del_files: -	do_remove_driverfs_files(); -	driver_unregister(&sdebug_driverfs_driver);  bus_unreg:  	bus_unregister(&pseudo_lld_bus);  dev_unreg: @@ -3497,7 +3474,6 @@ static void __exit scsi_debug_exit(void)  	stop_all_queued();  	for (; k; k--)  		sdebug_remove_adapter(); -	do_remove_driverfs_files();  	driver_unregister(&sdebug_driverfs_driver);  	bus_unregister(&pseudo_lld_bus);  	root_device_unregister(pseudo_primary); @@ -3608,6 +3584,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)  	int inj_transport = 0;  	int inj_dif = 0;  	int inj_dix = 0; +	int inj_short = 0;  	int delay_override = 0;  	int unmap = 0; @@ -3653,6 +3630,8 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)  			inj_dif = 1; /* to reads and writes below */  		else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)  			inj_dix = 1; /* to reads and writes below */ +		else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts) +			inj_short = 1;  	}  	if (devip->wlun) { @@ -3769,6 +3748,10 @@ read:  		if (scsi_debug_fake_rw)  			break;  		get_data_transfer_info(cmd, &lba, &num, &ei_lba); + +		if (inj_short) +			num /= 2; +  		errsts = resp_read(SCpnt, lba, num, devip, ei_lba);  		if (inj_recovered && (0 == errsts)) {  			mk_sense_buffer(devip, RECOVERED_ERROR, @@ -3980,6 +3963,8 @@ static int sdebug_driver_probe(struct device * dev)  	sdbg_host = to_sdebug_host(dev);  	sdebug_driver_template.can_queue = scsi_debug_max_queue; +	if (scsi_debug_clustering) +		sdebug_driver_template.use_clustering = ENABLE_CLUSTERING;  	hpnt = scsi_host_alloc(&sdebug_driver_template, sizeof(sdbg_host));  	if (NULL == hpnt) {  		printk(KERN_ERR "%s: scsi_register failed\n", __func__); @@ -4087,4 +4072,5 @@ static struct bus_type pseudo_lld_bus = {  	.match = pseudo_lld_bus_match,  	.probe = sdebug_driver_probe,  	.remove = sdebug_driver_remove, +	.drv_groups = sdebug_drv_groups,  }; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 83e591b6019..7e957918f33 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -53,6 +53,8 @@ static void scsi_eh_done(struct scsi_cmnd *scmd);  #define HOST_RESET_SETTLE_TIME  (10)  static int scsi_eh_try_stu(struct scsi_cmnd *scmd); +static int scsi_try_to_abort_cmd(struct scsi_host_template *, +				 struct scsi_cmnd *);  /* called with shost->host_lock held */  void scsi_eh_wakeup(struct Scsi_Host *shost) @@ -87,6 +89,140 @@ void scsi_schedule_eh(struct Scsi_Host *shost)  }  EXPORT_SYMBOL_GPL(scsi_schedule_eh); +static int scsi_host_eh_past_deadline(struct Scsi_Host *shost) +{ +	if (!shost->last_reset || shost->eh_deadline == -1) +		return 0; + +	/* +	 * 32bit accesses are guaranteed to be atomic +	 * (on all supported architectures), so instead +	 * of using a spinlock we can as well double check +	 * if eh_deadline has been set to 'off' during the +	 * time_before call. +	 */ +	if (time_before(jiffies, shost->last_reset + shost->eh_deadline) && +	    shost->eh_deadline > -1) +		return 0; + +	return 1; +} + +/** + * scmd_eh_abort_handler - Handle command aborts + * @work:	command to be aborted. + */ +void +scmd_eh_abort_handler(struct work_struct *work) +{ +	struct scsi_cmnd *scmd = +		container_of(work, struct scsi_cmnd, abort_work.work); +	struct scsi_device *sdev = scmd->device; +	int rtn; + +	if (scsi_host_eh_past_deadline(sdev->host)) { +		SCSI_LOG_ERROR_RECOVERY(3, +			scmd_printk(KERN_INFO, scmd, +				    "scmd %p eh timeout, not aborting\n", +				    scmd)); +	} else { +		SCSI_LOG_ERROR_RECOVERY(3, +			scmd_printk(KERN_INFO, scmd, +				    "aborting command %p\n", scmd)); +		rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); +		if (rtn == SUCCESS) { +			set_host_byte(scmd, DID_TIME_OUT); +			if (scsi_host_eh_past_deadline(sdev->host)) { +				SCSI_LOG_ERROR_RECOVERY(3, +					scmd_printk(KERN_INFO, scmd, +						    "scmd %p eh timeout, " +						    "not retrying aborted " +						    "command\n", scmd)); +			} else if (!scsi_noretry_cmd(scmd) && +			    (++scmd->retries <= scmd->allowed)) { +				SCSI_LOG_ERROR_RECOVERY(3, +					scmd_printk(KERN_WARNING, scmd, +						    "scmd %p retry " +						    "aborted command\n", scmd)); +				scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); +				return; +			} else { +				SCSI_LOG_ERROR_RECOVERY(3, +					scmd_printk(KERN_WARNING, scmd, +						    "scmd %p finish " +						    "aborted command\n", scmd)); +				scsi_finish_command(scmd); +				return; +			} +		} else { +			SCSI_LOG_ERROR_RECOVERY(3, +				scmd_printk(KERN_INFO, scmd, +					    "scmd %p abort failed, rtn %d\n", +					    scmd, rtn)); +		} +	} + +	if (!scsi_eh_scmd_add(scmd, 0)) { +		SCSI_LOG_ERROR_RECOVERY(3, +			scmd_printk(KERN_WARNING, scmd, +				    "scmd %p terminate " +				    "aborted command\n", scmd)); +		set_host_byte(scmd, DID_TIME_OUT); +		scsi_finish_command(scmd); +	} +} + +/** + * scsi_abort_command - schedule a command abort + * @scmd:	scmd to abort. + * + * We only need to abort commands after a command timeout + */ +static int +scsi_abort_command(struct scsi_cmnd *scmd) +{ +	struct scsi_device *sdev = scmd->device; +	struct Scsi_Host *shost = sdev->host; +	unsigned long flags; + +	if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { +		/* +		 * Retry after abort failed, escalate to next level. +		 */ +		scmd->eh_eflags &= ~SCSI_EH_ABORT_SCHEDULED; +		SCSI_LOG_ERROR_RECOVERY(3, +			scmd_printk(KERN_INFO, scmd, +				    "scmd %p previous abort failed\n", scmd)); +		cancel_delayed_work(&scmd->abort_work); +		return FAILED; +	} + +	/* +	 * Do not try a command abort if +	 * SCSI EH has already started. +	 */ +	spin_lock_irqsave(shost->host_lock, flags); +	if (scsi_host_in_recovery(shost)) { +		spin_unlock_irqrestore(shost->host_lock, flags); +		SCSI_LOG_ERROR_RECOVERY(3, +			scmd_printk(KERN_INFO, scmd, +				    "scmd %p not aborting, host in recovery\n", +				    scmd)); +		return FAILED; +	} + +	if (shost->eh_deadline != -1 && !shost->last_reset) +		shost->last_reset = jiffies; +	spin_unlock_irqrestore(shost->host_lock, flags); + +	scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED; +	SCSI_LOG_ERROR_RECOVERY(3, +		scmd_printk(KERN_INFO, scmd, +			    "scmd %p abort scheduled\n", scmd)); +	queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100); +	return SUCCESS; +} +  /**   * scsi_eh_scmd_add - add scsi cmd to error handling.   * @scmd:	scmd to run eh on. @@ -109,7 +245,12 @@ int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag)  		if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY))  			goto out_unlock; +	if (shost->eh_deadline != -1 && !shost->last_reset) +		shost->last_reset = jiffies; +  	ret = 1; +	if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) +		eh_flag &= ~SCSI_EH_CANCEL_CMD;  	scmd->eh_eflags |= eh_flag;  	list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);  	shost->host_failed++; @@ -138,16 +279,23 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)  	trace_scsi_dispatch_cmd_timeout(scmd);  	scsi_log_completion(scmd, TIMEOUT_ERROR); +	if (host->eh_deadline != -1 && !host->last_reset) +		host->last_reset = jiffies; +  	if (host->transportt->eh_timed_out)  		rtn = host->transportt->eh_timed_out(scmd);  	else if (host->hostt->eh_timed_out)  		rtn = host->hostt->eh_timed_out(scmd); -	scmd->result |= DID_TIME_OUT << 16; +	if (rtn == BLK_EH_NOT_HANDLED) { +		if (!host->hostt->no_async_abort && +		    scsi_abort_command(scmd) == SUCCESS) +			return BLK_EH_NOT_HANDLED; -	if (unlikely(rtn == BLK_EH_NOT_HANDLED && -		     !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) -		rtn = BLK_EH_HANDLED; +		set_host_byte(scmd, DID_TIME_OUT); +		if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)) +			rtn = BLK_EH_HANDLED; +	}  	return rtn;  } @@ -773,10 +921,12 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,  	ses->prot_op = scmd->prot_op;  	scmd->prot_op = SCSI_PROT_NORMAL; +	scmd->eh_eflags = 0;  	scmd->cmnd = ses->eh_cmnd;  	memset(scmd->cmnd, 0, BLK_MAX_CDB);  	memset(&scmd->sdb, 0, sizeof(scmd->sdb));  	scmd->request->next_rq = NULL; +	scmd->result = 0;  	if (sense_bytes) {  		scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE, @@ -879,6 +1029,7 @@ retry:  		rtn = NEEDS_RETRY;  	} else {  		timeleft = wait_for_completion_timeout(&done, timeout); +		rtn = SUCCESS;  	}  	shost->eh_action = NULL; @@ -923,12 +1074,6 @@ retry:  	scsi_eh_restore_cmnd(scmd, &ses); -	if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { -		struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); -		if (sdrv->eh_action) -			rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn); -	} -  	return rtn;  } @@ -946,6 +1091,16 @@ static int scsi_request_sense(struct scsi_cmnd *scmd)  	return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);  } +static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn) +{ +	if (scmd->request->cmd_type != REQ_TYPE_BLOCK_PC) { +		struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd); +		if (sdrv->eh_action) +			rtn = sdrv->eh_action(scmd, rtn); +	} +	return rtn; +} +  /**   * scsi_eh_finish_cmd - Handle a cmd that eh is finished with.   * @scmd:	Original SCSI cmd that eh has finished. @@ -990,6 +1145,7 @@ int scsi_eh_get_sense(struct list_head *work_q,  		      struct list_head *done_q)  {  	struct scsi_cmnd *scmd, *next; +	struct Scsi_Host *shost;  	int rtn;  	list_for_each_entry_safe(scmd, next, work_q, eh_entry) { @@ -997,6 +1153,23 @@ int scsi_eh_get_sense(struct list_head *work_q,  		    SCSI_SENSE_VALID(scmd))  			continue; +		shost = scmd->device->host; +		if (scsi_host_eh_past_deadline(shost)) { +			SCSI_LOG_ERROR_RECOVERY(3, +				shost_printk(KERN_INFO, shost, +					    "skip %s, past eh deadline\n", +					     __func__)); +			break; +		} +		if (status_byte(scmd->result) != CHECK_CONDITION) +			/* +			 * don't request sense if there's no check condition +			 * status because the error we're processing isn't one +			 * that has a sense code (and some devices get +			 * confused by sense requests out of the blue) +			 */ +			continue; +  		SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,  						  "%s: requesting sense\n",  						  current->comm)); @@ -1087,6 +1260,18 @@ static int scsi_eh_test_devices(struct list_head *cmd_list,  		scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);  		sdev = scmd->device; +		if (!try_stu) { +			if (scsi_host_eh_past_deadline(sdev->host)) { +				/* Push items back onto work_q */ +				list_splice_init(cmd_list, work_q); +				SCSI_LOG_ERROR_RECOVERY(3, +					shost_printk(KERN_INFO, sdev->host, +						     "skip %s, past eh deadline", +						     __func__)); +				break; +			} +		} +  		finish_cmds = !scsi_device_online(scmd->device) ||  			(try_stu && !scsi_eh_try_stu(scmd) &&  			 !scsi_eh_tur(scmd)) || @@ -1094,7 +1279,9 @@ static int scsi_eh_test_devices(struct list_head *cmd_list,  		list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)  			if (scmd->device == sdev) { -				if (finish_cmds) +				if (finish_cmds && +				    (try_stu || +				     scsi_eh_action(scmd, SUCCESS) == SUCCESS))  					scsi_eh_finish_cmd(scmd, done_q);  				else  					list_move_tail(&scmd->eh_entry, work_q); @@ -1122,26 +1309,38 @@ static int scsi_eh_abort_cmds(struct list_head *work_q,  	struct scsi_cmnd *scmd, *next;  	LIST_HEAD(check_list);  	int rtn; +	struct Scsi_Host *shost;  	list_for_each_entry_safe(scmd, next, work_q, eh_entry) {  		if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD))  			continue; +		shost = scmd->device->host; +		if (scsi_host_eh_past_deadline(shost)) { +			list_splice_init(&check_list, work_q); +			SCSI_LOG_ERROR_RECOVERY(3, +				shost_printk(KERN_INFO, shost, +					    "skip %s, past eh deadline\n", +					     __func__)); +			return list_empty(work_q); +		}  		SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:"  						  "0x%p\n", current->comm,  						  scmd)); -		rtn = scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd); -		if (rtn == SUCCESS || rtn == FAST_IO_FAIL) { -			scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; -			if (rtn == FAST_IO_FAIL) -				scsi_eh_finish_cmd(scmd, done_q); -			else -				list_move_tail(&scmd->eh_entry, &check_list); -		} else +		rtn = scsi_try_to_abort_cmd(shost->hostt, scmd); +		if (rtn == FAILED) {  			SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting"  							  " cmd failed:"  							  "0x%p\n",  							  current->comm,  							  scmd)); +			list_splice_init(&check_list, work_q); +			return list_empty(work_q); +		} +		scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; +		if (rtn == FAST_IO_FAIL) +			scsi_eh_finish_cmd(scmd, done_q); +		else +			list_move_tail(&scmd->eh_entry, &check_list);  	}  	return scsi_eh_test_devices(&check_list, work_q, done_q, 0); @@ -1189,6 +1388,13 @@ static int scsi_eh_stu(struct Scsi_Host *shost,  	struct scsi_device *sdev;  	shost_for_each_device(sdev, shost) { +		if (scsi_host_eh_past_deadline(shost)) { +			SCSI_LOG_ERROR_RECOVERY(3, +				shost_printk(KERN_INFO, shost, +					    "skip %s, past eh deadline\n", +					     __func__)); +			break; +		}  		stu_scmd = NULL;  		list_for_each_entry(scmd, work_q, eh_entry)  			if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && @@ -1208,7 +1414,8 @@ static int scsi_eh_stu(struct Scsi_Host *shost,  			    !scsi_eh_tur(stu_scmd)) {  				list_for_each_entry_safe(scmd, next,  							  work_q, eh_entry) { -					if (scmd->device == sdev) +					if (scmd->device == sdev && +					    scsi_eh_action(scmd, SUCCESS) == SUCCESS)  						scsi_eh_finish_cmd(scmd, done_q);  				}  			} @@ -1244,6 +1451,13 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,  	int rtn;  	shost_for_each_device(sdev, shost) { +		if (scsi_host_eh_past_deadline(shost)) { +			SCSI_LOG_ERROR_RECOVERY(3, +				shost_printk(KERN_INFO, shost, +					    "skip %s, past eh deadline\n", +					     __func__)); +			break; +		}  		bdr_scmd = NULL;  		list_for_each_entry(scmd, work_q, eh_entry)  			if (scmd->device == sdev) { @@ -1264,7 +1478,8 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,  			    !scsi_eh_tur(bdr_scmd)) {  				list_for_each_entry_safe(scmd, next,  							 work_q, eh_entry) { -					if (scmd->device == sdev) +					if (scmd->device == sdev && +					    scsi_eh_action(scmd, rtn) != FAILED)  						scsi_eh_finish_cmd(scmd,  								   done_q);  				} @@ -1304,6 +1519,17 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,  		int rtn;  		unsigned int id; +		if (scsi_host_eh_past_deadline(shost)) { +			/* push back on work queue for further processing */ +			list_splice_init(&check_list, work_q); +			list_splice_init(&tmp_list, work_q); +			SCSI_LOG_ERROR_RECOVERY(3, +				shost_printk(KERN_INFO, shost, +					    "skip %s, past eh deadline\n", +					     __func__)); +			return list_empty(work_q); +		} +  		scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);  		id = scmd_id(scmd); @@ -1356,6 +1582,15 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,  	 */  	for (channel = 0; channel <= shost->max_channel; channel++) { +		if (scsi_host_eh_past_deadline(shost)) { +			list_splice_init(&check_list, work_q); +			SCSI_LOG_ERROR_RECOVERY(3, +				shost_printk(KERN_INFO, shost, +					    "skip %s, past eh deadline\n", +					     __func__)); +			return list_empty(work_q); +		} +  		chan_scmd = NULL;  		list_for_each_entry(scmd, work_q, eh_entry) {  			if (channel == scmd_channel(scmd)) { @@ -1455,7 +1690,7 @@ static void scsi_eh_offline_sdevs(struct list_head *work_q,  }  /** - * scsi_noretry_cmd - determinte if command should be failed fast + * scsi_noretry_cmd - determine if command should be failed fast   * @scmd:	SCSI cmd to examine.   */  int scsi_noretry_cmd(struct scsi_cmnd *scmd) @@ -1463,6 +1698,8 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)  	switch (host_byte(scmd->result)) {  	case DID_OK:  		break; +	case DID_TIME_OUT: +		goto check_type;  	case DID_BUS_BUSY:  		return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT);  	case DID_PARITY: @@ -1476,18 +1713,19 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd)  		return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER);  	} -	switch (status_byte(scmd->result)) { -	case CHECK_CONDITION: -		/* -		 * assume caller has checked sense and determinted -		 * the check condition was retryable. -		 */ -		if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || -		    scmd->request->cmd_type == REQ_TYPE_BLOCK_PC) -			return 1; -	} +	if (status_byte(scmd->result) != CHECK_CONDITION) +		return 0; -	return 0; +check_type: +	/* +	 * assume caller has checked sense and determined +	 * the check condition was retryable. +	 */ +	if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || +	    scmd->request->cmd_type == REQ_TYPE_BLOCK_PC) +		return 1; +	else +		return 0;  }  /** @@ -1537,9 +1775,13 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)  		 * looks good.  drop through, and check the next byte.  		 */  		break; +	case DID_ABORT: +		if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { +			set_host_byte(scmd, DID_TIME_OUT); +			return SUCCESS; +		}  	case DID_NO_CONNECT:  	case DID_BAD_TARGET: -	case DID_ABORT:  		/*  		 * note - this means that we just report the status back  		 * to the top level driver, not that we actually think @@ -1710,6 +1952,8 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)  	 */  	req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); +	blk_rq_set_block_pc(req); +  	req->cmd[0] = ALLOW_MEDIUM_REMOVAL;  	req->cmd[1] = 0;  	req->cmd[2] = 0; @@ -1719,7 +1963,6 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)  	req->cmd_len = COMMAND_SIZE(req->cmd[0]); -	req->cmd_type = REQ_TYPE_BLOCK_PC;  	req->cmd_flags |= REQ_QUIET;  	req->timeout = 10 * HZ;  	req->retries = 5; @@ -1755,8 +1998,9 @@ static void scsi_restart_operations(struct Scsi_Host *shost)  	 * will be requests for character device operations, and also for  	 * ioctls to queued block devices.  	 */ -	SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", -					  __func__)); +	SCSI_LOG_ERROR_RECOVERY(3, +		printk("scsi_eh_%d waking up host to restart\n", +		       shost->host_no));  	spin_lock_irqsave(shost->host_lock, flags);  	if (scsi_host_set_state(shost, SHOST_RUNNING)) @@ -1883,6 +2127,10 @@ static void scsi_unjam_host(struct Scsi_Host *shost)  		if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q))  			scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); +	spin_lock_irqsave(shost->host_lock, flags); +	if (shost->eh_deadline != -1) +		shost->last_reset = 0; +	spin_unlock_irqrestore(shost->host_lock, flags);  	scsi_eh_flush_done_q(&eh_done_q);  } @@ -1909,7 +2157,7 @@ int scsi_error_handler(void *data)  		if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||  		    shost->host_failed != shost->host_busy) {  			SCSI_LOG_ERROR_RECOVERY(1, -				printk("Error handler scsi_eh_%d sleeping\n", +				printk("scsi_eh_%d: sleeping\n",  					shost->host_no));  			schedule();  			continue; @@ -1917,8 +2165,9 @@ int scsi_error_handler(void *data)  		__set_current_state(TASK_RUNNING);  		SCSI_LOG_ERROR_RECOVERY(1, -			printk("Error handler scsi_eh_%d waking up\n", -				shost->host_no)); +			printk("scsi_eh_%d: waking up %d/%d/%d\n", +			       shost->host_no, shost->host_eh_scheduled, +			       shost->host_failed, shost->host_busy));  		/*  		 * We have a host that is failing for some reason.  Figure out @@ -2053,7 +2302,18 @@ scsi_reset_provider(struct scsi_device *dev, int flag)  	if (scsi_autopm_get_host(shost) < 0)  		return FAILED; +	if (!get_device(&dev->sdev_gendev)) { +		rtn = FAILED; +		goto out_put_autopm_host; +	} +  	scmd = scsi_get_command(dev, GFP_KERNEL); +	if (!scmd) { +		rtn = FAILED; +		put_device(&dev->sdev_gendev); +		goto out_put_autopm_host; +	} +  	blk_rq_init(NULL, &req);  	scmd->request = &req; @@ -2110,6 +2370,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)  	scsi_run_host_queues(shost);  	scsi_next_command(scmd); +out_put_autopm_host:  	scsi_autopm_put_host(shost);  	return rtn;  } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index d1549b74e2d..3f50dfcb322 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -75,28 +75,6 @@ struct kmem_cache *scsi_sdb_cache;   */  #define SCSI_QUEUE_DELAY	3 -/* - * Function:	scsi_unprep_request() - * - * Purpose:	Remove all preparation done for a request, including its - *		associated scsi_cmnd, so that it can be requeued. - * - * Arguments:	req	- request to unprepare - * - * Lock status:	Assumed that no locks are held upon entry. - * - * Returns:	Nothing. - */ -static void scsi_unprep_request(struct request *req) -{ -	struct scsi_cmnd *cmd = req->special; - -	blk_unprep_request(req); -	req->special = NULL; - -	scsi_put_command(cmd); -} -  /**   * __scsi_queue_insert - private queue insertion   * @cmd: The SCSI command being requeued @@ -159,9 +137,10 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)  	 * lock such that the kblockd_schedule_work() call happens  	 * before blk_cleanup_queue() finishes.  	 */ +	cmd->result = 0;  	spin_lock_irqsave(q->queue_lock, flags);  	blk_requeue_request(q, cmd->request); -	kblockd_schedule_work(q, &device->requeue_work); +	kblockd_schedule_work(&device->requeue_work);  	spin_unlock_irqrestore(q->queue_lock, flags);  } @@ -206,7 +185,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)   */  int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,  		 int data_direction, void *buffer, unsigned bufflen, -		 unsigned char *sense, int timeout, int retries, int flags, +		 unsigned char *sense, int timeout, int retries, u64 flags,  		 int *resid)  {  	struct request *req; @@ -216,6 +195,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,  	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);  	if (!req)  		return ret; +	blk_rq_set_block_pc(req);  	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,  					buffer, bufflen, __GFP_WAIT)) @@ -227,7 +207,6 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,  	req->sense_len = 0;  	req->retries = retries;  	req->timeout = timeout; -	req->cmd_type = REQ_TYPE_BLOCK_PC;  	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;  	/* @@ -257,7 +236,7 @@ EXPORT_SYMBOL(scsi_execute);  int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,  		     int data_direction, void *buffer, unsigned bufflen,  		     struct scsi_sense_hdr *sshdr, int timeout, int retries, -		     int *resid, int flags) +		     int *resid, u64 flags)  {  	char *sense = NULL;  	int result; @@ -385,29 +364,12 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)  	return 0;  } -/* - * Function:	scsi_run_queue() - * - * Purpose:	Select a proper request queue to serve next - * - * Arguments:	q	- last request's queue - * - * Returns:     Nothing - * - * Notes:	The previous command was completely finished, start - *		a new one if possible. - */ -static void scsi_run_queue(struct request_queue *q) +static void scsi_starved_list_run(struct Scsi_Host *shost)  { -	struct scsi_device *sdev = q->queuedata; -	struct Scsi_Host *shost;  	LIST_HEAD(starved_list); +	struct scsi_device *sdev;  	unsigned long flags; -	shost = sdev->host; -	if (scsi_target(sdev)->single_lun) -		scsi_single_lun_run(sdev); -  	spin_lock_irqsave(shost->host_lock, flags);  	list_splice_init(&shost->starved_list, &starved_list); @@ -459,6 +421,28 @@ static void scsi_run_queue(struct request_queue *q)  	/* put any unprocessed entries back */  	list_splice(&starved_list, &shost->starved_list);  	spin_unlock_irqrestore(shost->host_lock, flags); +} + +/* + * Function:   scsi_run_queue() + * + * Purpose:    Select a proper request queue to serve next + * + * Arguments:  q       - last request's queue + * + * Returns:     Nothing + * + * Notes:      The previous command was completely finished, start + *             a new one if possible. + */ +static void scsi_run_queue(struct request_queue *q) +{ +	struct scsi_device *sdev = q->queuedata; + +	if (scsi_target(sdev)->single_lun) +		scsi_single_lun_run(sdev); +	if (!list_empty(&sdev->host->starved_list)) +		scsi_starved_list_run(sdev->host);  	blk_run_queue(q);  } @@ -497,16 +481,10 @@ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)  	struct request *req = cmd->request;  	unsigned long flags; -	/* -	 * We need to hold a reference on the device to avoid the queue being -	 * killed after the unlock and before scsi_run_queue is invoked which -	 * may happen because scsi_unprep_request() puts the command which -	 * releases its reference on the device. -	 */ -	get_device(&sdev->sdev_gendev); -  	spin_lock_irqsave(q->queue_lock, flags); -	scsi_unprep_request(req); +	blk_unprep_request(req); +	req->special = NULL; +	scsi_put_command(cmd);  	blk_requeue_request(q, req);  	spin_unlock_irqrestore(q->queue_lock, flags); @@ -520,13 +498,9 @@ void scsi_next_command(struct scsi_cmnd *cmd)  	struct scsi_device *sdev = cmd->device;  	struct request_queue *q = sdev->request_queue; -	/* need to hold a reference on the device before we let go of the cmd */ -	get_device(&sdev->sdev_gendev); -  	scsi_put_command(cmd);  	scsi_run_queue(q); -	/* ok to remove device now */  	put_device(&sdev->sdev_gendev);  } @@ -538,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)  		scsi_run_queue(sdev->request_queue);  } -static void __scsi_release_buffers(struct scsi_cmnd *, int); - -/* - * Function:    scsi_end_request() - * - * Purpose:     Post-processing of completed commands (usually invoked at end - *		of upper level post-processing and scsi_io_completion). - * - * Arguments:   cmd	 - command that is complete. - *              error    - 0 if I/O indicates success, < 0 for I/O error. - *              bytes    - number of bytes of completed I/O - *		requeue  - indicates whether we should requeue leftovers. - * - * Lock status: Assumed that lock is not held upon entry. - * - * Returns:     cmd if requeue required, NULL otherwise. - * - * Notes:       This is called for block device requests in order to - *              mark some number of sectors as complete. - *  - *		We are guaranteeing that the request queue will be goosed - *		at some point during this call. - * Notes:	If cmd was requeued, upon return it will be a stale pointer. - */ -static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error, -					  int bytes, int requeue) -{ -	struct request_queue *q = cmd->device->request_queue; -	struct request *req = cmd->request; - -	/* -	 * If there are blocks left over at the end, set up the command -	 * to queue the remainder of them. -	 */ -	if (blk_end_request(req, error, bytes)) { -		/* kill remainder if no retrys */ -		if (error && scsi_noretry_cmd(cmd)) -			blk_end_request_all(req, error); -		else { -			if (requeue) { -				/* -				 * Bleah.  Leftovers again.  Stick the -				 * leftovers in the front of the -				 * queue, and goose the queue again. -				 */ -				scsi_release_buffers(cmd); -				scsi_requeue_command(q, cmd); -				cmd = NULL; -			} -			return cmd; -		} -	} - -	/* -	 * This will goose the queue request function at the end, so we don't -	 * need to worry about launching another command. -	 */ -	__scsi_release_buffers(cmd, 0); -	scsi_next_command(cmd); -	return NULL; -} -  static inline unsigned int scsi_sgtable_index(unsigned short nents)  {  	unsigned int index; @@ -651,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)  	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);  } -static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check) -{ - -	if (cmd->sdb.table.nents) -		scsi_free_sgtable(&cmd->sdb); - -	memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - -	if (do_bidi_check && scsi_bidi_cmnd(cmd)) { -		struct scsi_data_buffer *bidi_sdb = -			cmd->request->next_rq->special; -		scsi_free_sgtable(bidi_sdb); -		kmem_cache_free(scsi_sdb_cache, bidi_sdb); -		cmd->request->next_rq->special = NULL; -	} - -	if (scsi_prot_sg_count(cmd)) -		scsi_free_sgtable(cmd->prot_sdb); -} -  /*   * Function:    scsi_release_buffers()   * - * Purpose:     Completion processing for block device I/O requests. + * Purpose:     Free resources allocate for a scsi_command.   *   * Arguments:   cmd	- command that we are bailing.   * @@ -685,15 +577,29 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)   * Notes:       In the event that an upper level driver rejects a   *		command, we must release resources allocated during   *		the __init_io() function.  Primarily this would involve - *		the scatter-gather table, and potentially any bounce - *		buffers. + *		the scatter-gather table.   */  void scsi_release_buffers(struct scsi_cmnd *cmd)  { -	__scsi_release_buffers(cmd, 1); +	if (cmd->sdb.table.nents) +		scsi_free_sgtable(&cmd->sdb); + +	memset(&cmd->sdb, 0, sizeof(cmd->sdb)); + +	if (scsi_prot_sg_count(cmd)) +		scsi_free_sgtable(cmd->prot_sdb);  }  EXPORT_SYMBOL(scsi_release_buffers); +static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) +{ +	struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special; + +	scsi_free_sgtable(bidi_sdb); +	kmem_cache_free(scsi_sdb_cache, bidi_sdb); +	cmd->request->next_rq->special = NULL; +} +  /**   * __scsi_error_from_host_byte - translate SCSI error code into errno   * @cmd:	SCSI command (unused) @@ -751,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)   *   * Returns:     Nothing   * - * Notes:       This function is matched in terms of capabilities to - *              the function that created the scatter-gather list. - *              In other words, if there are no bounce buffers - *              (the normal case for most drivers), we don't need - *              the logic to deal with cleaning up afterwards. - * - *		We must call scsi_end_request().  This will finish off - *		the specified number of sectors.  If we are done, the - *		command block will be released and the queue function - *		will be goosed.  If we are not done then we have to + * Notes:       We will finish off the specified number of sectors.  If we + *		are done, the command block will be released and the queue + *		function will be goosed.  If we are not done then we have to   *		figure out what to do next:   *   *		a) We can call scsi_requeue_command().  The request @@ -769,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)   *		   be used if we made forward progress, or if we want   *		   to switch from READ(10) to READ(6) for example.   * - *		b) We can call scsi_queue_insert().  The request will + *		b) We can call __scsi_queue_insert().  The request will   *		   be put back on the queue and retried using the same   *		   command as before, possibly after a delay.   * @@ -788,6 +687,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,  	      ACTION_DELAYED_RETRY} action;  	char *description = NULL; +	unsigned long wait_for = (cmd->allowed + 1) * req->timeout;  	if (result) {  		sense_valid = scsi_command_normalize_sense(cmd, &sshdr); @@ -826,11 +726,21 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  			req->next_rq->resid_len = scsi_in(cmd)->resid;  			scsi_release_buffers(cmd); +			scsi_release_bidi_buffers(cmd); +  			blk_end_request_all(req, 0);  			scsi_next_command(cmd);  			return;  		} +	} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { +		/* +		 * Certain non BLOCK_PC requests are commands that don't +		 * actually transfer anything (FLUSH), so cannot use +		 * good_bytes != blk_rq_bytes(req) as the signal for an error. +		 * This sets the error explicitly for the problem case. +		 */ +		error = __scsi_error_from_host_byte(cmd, result);  	}  	/* no bidi support for !REQ_TYPE_BLOCK_PC yet */ @@ -865,12 +775,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  	}  	/* -	 * A number of bytes were successfully read.  If there -	 * are leftovers and there is some kind of error -	 * (result != 0), retry the rest. +	 * If we finished all bytes in the request we are done now.  	 */ -	if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) -		return; +	if (!blk_end_request(req, error, good_bytes)) +		goto next_command; + +	/* +	 * Kill remainder if no retrys. +	 */ +	if (error && scsi_noretry_cmd(cmd)) { +		blk_end_request_all(req, error); +		goto next_command; +	} + +	/* +	 * If there had been no error, but we have leftover bytes in the +	 * requeues just queue the command up again. +	 */ +	if (result == 0) +		goto requeue;  	error = __scsi_error_from_host_byte(cmd, result); @@ -989,10 +912,15 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  		action = ACTION_FAIL;  	} +	if (action != ACTION_FAIL && +	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { +		action = ACTION_FAIL; +		description = "Command timed out"; +	} +  	switch (action) {  	case ACTION_FAIL:  		/* Give up and fail the remainder of the request */ -		scsi_release_buffers(cmd);  		if (!(req->cmd_flags & REQ_QUIET)) {  			if (description)  				scmd_printk(KERN_INFO, cmd, "%s\n", @@ -1002,12 +930,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  				scsi_print_sense("", cmd);  			scsi_print_command(cmd);  		} -		if (blk_end_request_err(req, error)) -			scsi_requeue_command(q, cmd); -		else -			scsi_next_command(cmd); -		break; +		if (!blk_end_request_err(req, error)) +			goto next_command; +		/*FALLTHRU*/  	case ACTION_REPREP: +	requeue:  		/* Unprep the request and put it back at the head of the queue.  		 * A new command will be prepared and issued.  		 */ @@ -1023,6 +950,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)  		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);  		break;  	} +	return; + +next_command: +	scsi_release_buffers(cmd); +	scsi_next_command(cmd);  }  static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, @@ -1038,8 +970,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,  		return BLKPREP_DEFER;  	} -	req->buffer = NULL; -  	/*   	 * Next, walk the list, and fill in the addresses and sizes of  	 * each segment. @@ -1064,6 +994,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,   */  int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)  { +	struct scsi_device *sdev = cmd->device;  	struct request *rq = cmd->request;  	int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); @@ -1111,6 +1042,7 @@ err_exit:  	scsi_release_buffers(cmd);  	cmd->request->special = NULL;  	scsi_put_command(cmd); +	put_device(&sdev->sdev_gendev);  	return error;  }  EXPORT_SYMBOL(scsi_init_io); @@ -1121,9 +1053,15 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,  	struct scsi_cmnd *cmd;  	if (!req->special) { +		/* Bail if we can't get a reference to the device */ +		if (!get_device(&sdev->sdev_gendev)) +			return NULL; +  		cmd = scsi_get_command(sdev, GFP_ATOMIC); -		if (unlikely(!cmd)) +		if (unlikely(!cmd)) { +			put_device(&sdev->sdev_gendev);  			return NULL; +		}  		req->special = cmd;  	} else {  		cmd = req->special; @@ -1141,15 +1079,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,  int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)  { -	struct scsi_cmnd *cmd; -	int ret = scsi_prep_state_check(sdev, req); - -	if (ret != BLKPREP_OK) -		return ret; - -	cmd = scsi_get_cmd_from_req(sdev, req); -	if (unlikely(!cmd)) -		return BLKPREP_DEFER; +	struct scsi_cmnd *cmd = req->special;  	/*  	 * BLOCK_PC requests may transfer data, in which case they must @@ -1169,7 +1099,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)  		BUG_ON(blk_rq_bytes(req));  		memset(&cmd->sdb, 0, sizeof(cmd->sdb)); -		req->buffer = NULL;  	}  	cmd->cmd_len = req->cmd_len; @@ -1193,15 +1122,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);   */  int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)  { -	struct scsi_cmnd *cmd; -	int ret = scsi_prep_state_check(sdev, req); - -	if (ret != BLKPREP_OK) -		return ret; +	struct scsi_cmnd *cmd = req->special;  	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh  			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) { -		ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req); +		int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);  		if (ret != BLKPREP_OK)  			return ret;  	} @@ -1211,16 +1136,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)  	 */  	BUG_ON(!req->nr_phys_segments); -	cmd = scsi_get_cmd_from_req(sdev, req); -	if (unlikely(!cmd)) -		return BLKPREP_DEFER; -  	memset(cmd->cmnd, 0, BLK_MAX_CDB);  	return scsi_init_io(cmd, GFP_ATOMIC);  }  EXPORT_SYMBOL(scsi_setup_fs_cmnd); -int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) +static int +scsi_prep_state_check(struct scsi_device *sdev, struct request *req)  {  	int ret = BLKPREP_OK; @@ -1272,9 +1194,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)  	}  	return ret;  } -EXPORT_SYMBOL(scsi_prep_state_check); -int scsi_prep_return(struct request_queue *q, struct request *req, int ret) +static int +scsi_prep_return(struct request_queue *q, struct request *req, int ret)  {  	struct scsi_device *sdev = q->queuedata; @@ -1286,6 +1208,7 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)  			struct scsi_cmnd *cmd = req->special;  			scsi_release_buffers(cmd);  			scsi_put_command(cmd); +			put_device(&sdev->sdev_gendev);  			req->special = NULL;  		}  		break; @@ -1304,18 +1227,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)  	return ret;  } -EXPORT_SYMBOL(scsi_prep_return); -int scsi_prep_fn(struct request_queue *q, struct request *req) +static int scsi_prep_fn(struct request_queue *q, struct request *req)  {  	struct scsi_device *sdev = q->queuedata; -	int ret = BLKPREP_KILL; +	struct scsi_cmnd *cmd; +	int ret; + +	ret = scsi_prep_state_check(sdev, req); +	if (ret != BLKPREP_OK) +		goto out; -	if (req->cmd_type == REQ_TYPE_BLOCK_PC) +	cmd = scsi_get_cmd_from_req(sdev, req); +	if (unlikely(!cmd)) { +		ret = BLKPREP_DEFER; +		goto out; +	} + +	if (req->cmd_type == REQ_TYPE_FS) +		ret = scsi_cmd_to_driver(cmd)->init_command(cmd); +	else if (req->cmd_type == REQ_TYPE_BLOCK_PC)  		ret = scsi_setup_blk_pc_cmnd(sdev, req); +	else +		ret = BLKPREP_KILL; + +out:  	return scsi_prep_return(q, req, ret);  } -EXPORT_SYMBOL(scsi_prep_fn); + +static void scsi_unprep_fn(struct request_queue *q, struct request *req) +{ +	if (req->cmd_type == REQ_TYPE_FS) { +		struct scsi_cmnd *cmd = req->special; +		struct scsi_driver *drv = scsi_cmd_to_driver(cmd); + +		if (drv->uninit_command) +			drv->uninit_command(cmd); +	} +}  /*   * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else @@ -1543,16 +1492,14 @@ static void scsi_softirq_done(struct request *rq)   * Lock status: IO request lock assumed to be held when called.   */  static void scsi_request_fn(struct request_queue *q) +	__releases(q->queue_lock) +	__acquires(q->queue_lock)  {  	struct scsi_device *sdev = q->queuedata;  	struct Scsi_Host *shost;  	struct scsi_cmnd *cmd;  	struct request *req; -	if(!get_device(&sdev->sdev_gendev)) -		/* We must be tearing the block queue down already */ -		return; -  	/*  	 * To start with, we keep looping until the queue is empty, or until  	 * the host is no longer able to accept any more requests. @@ -1641,7 +1588,7 @@ static void scsi_request_fn(struct request_queue *q)  			goto out_delay;  	} -	goto out; +	return;   not_ready:  	spin_unlock_irq(shost->host_lock); @@ -1660,12 +1607,6 @@ static void scsi_request_fn(struct request_queue *q)  out_delay:  	if (sdev->device_busy == 0)  		blk_delay_queue(q, SCSI_QUEUE_DELAY); -out: -	/* must be careful here...if we trigger the ->remove() function -	 * we cannot be holding the q lock */ -	spin_unlock_irq(q->queue_lock); -	put_device(&sdev->sdev_gendev); -	spin_lock_irq(q->queue_lock);  }  u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) @@ -1684,7 +1625,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)  	host_dev = scsi_get_device(shost);  	if (host_dev && host_dev->dma_mask) -		bounce_limit = *host_dev->dma_mask; +		bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;  	return bounce_limit;  } @@ -1744,6 +1685,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)  		return NULL;  	blk_queue_prep_rq(q, scsi_prep_fn); +	blk_queue_unprep_rq(q, scsi_unprep_fn);  	blk_queue_softirq_done(q, scsi_softirq_done);  	blk_queue_rq_timed_out(q, scsi_times_out);  	blk_queue_lld_busy(q, scsi_lld_busy); diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index fe30ea94ffe..109802f776e 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)  			goto next_msg;  		} -		if (!capable(CAP_SYS_ADMIN)) { +		if (!netlink_capable(skb, CAP_SYS_ADMIN)) {  			err = -EPERM;  			goto next_msg;  		} diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 4c5aabe2175..7454498c409 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -16,37 +16,79 @@  #include "scsi_priv.h" -static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *)) +#ifdef CONFIG_PM_SLEEP + +static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm) +{ +	return pm && pm->suspend ? pm->suspend(dev) : 0; +} + +static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm) +{ +	return pm && pm->freeze ? pm->freeze(dev) : 0; +} + +static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm) +{ +	return pm && pm->poweroff ? pm->poweroff(dev) : 0; +} + +static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)  { +	return pm && pm->resume ? pm->resume(dev) : 0; +} + +static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm) +{ +	return pm && pm->thaw ? pm->thaw(dev) : 0; +} + +static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm) +{ +	return pm && pm->restore ? pm->restore(dev) : 0; +} + +static int scsi_dev_type_suspend(struct device *dev, +		int (*cb)(struct device *, const struct dev_pm_ops *)) +{ +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;  	int err; +	/* flush pending in-flight resume operations, suspend is synchronous */ +	async_synchronize_full_domain(&scsi_sd_pm_domain); +  	err = scsi_device_quiesce(to_scsi_device(dev));  	if (err == 0) { -		if (cb) { -			err = cb(dev); -			if (err) -				scsi_device_resume(to_scsi_device(dev)); -		} +		err = cb(dev, pm); +		if (err) +			scsi_device_resume(to_scsi_device(dev));  	}  	dev_dbg(dev, "scsi suspend: %d\n", err);  	return err;  } -static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *)) +static int scsi_dev_type_resume(struct device *dev, +		int (*cb)(struct device *, const struct dev_pm_ops *))  { +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;  	int err = 0; -	if (cb) -		err = cb(dev); +	err = cb(dev, pm);  	scsi_device_resume(to_scsi_device(dev));  	dev_dbg(dev, "scsi resume: %d\n", err); + +	if (err == 0) { +		pm_runtime_disable(dev); +		pm_runtime_set_active(dev); +		pm_runtime_enable(dev); +	} +  	return err;  } -#ifdef CONFIG_PM_SLEEP -  static int -scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *)) +scsi_bus_suspend_common(struct device *dev, +		int (*cb)(struct device *, const struct dev_pm_ops *))  {  	int err = 0; @@ -54,7 +96,8 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))  		/*  		 * All the high-level SCSI drivers that implement runtime  		 * PM treat runtime suspend, system suspend, and system -		 * hibernate identically. +		 * hibernate nearly identically. In all cases the requirements +		 * for runtime suspension are stricter.  		 */  		if (pm_runtime_suspended(dev))  			return 0; @@ -65,20 +108,54 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))  	return err;  } -static int -scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *)) +static void async_sdev_resume(void *dev, async_cookie_t cookie)  { -	int err = 0; +	scsi_dev_type_resume(dev, do_scsi_resume); +} -	if (scsi_is_sdev_device(dev)) -		err = scsi_dev_type_resume(dev, cb); +static void async_sdev_thaw(void *dev, async_cookie_t cookie) +{ +	scsi_dev_type_resume(dev, do_scsi_thaw); +} -	if (err == 0) { +static void async_sdev_restore(void *dev, async_cookie_t cookie) +{ +	scsi_dev_type_resume(dev, do_scsi_restore); +} + +static int scsi_bus_resume_common(struct device *dev, +		int (*cb)(struct device *, const struct dev_pm_ops *)) +{ +	async_func_t fn; + +	if (!scsi_is_sdev_device(dev)) +		fn = NULL; +	else if (cb == do_scsi_resume) +		fn = async_sdev_resume; +	else if (cb == do_scsi_thaw) +		fn = async_sdev_thaw; +	else if (cb == do_scsi_restore) +		fn = async_sdev_restore; +	else +		fn = NULL; + +	if (fn) { +		async_schedule_domain(fn, dev, &scsi_sd_pm_domain); + +		/* +		 * If a user has disabled async probing a likely reason +		 * is due to a storage enclosure that does not inject +		 * staggered spin-ups.  For safety, make resume +		 * synchronous as well in that case. +		 */ +		if (strncmp(scsi_scan_type, "async", 5) != 0) +			async_synchronize_full_domain(&scsi_sd_pm_domain); +	} else {  		pm_runtime_disable(dev);  		pm_runtime_set_active(dev);  		pm_runtime_enable(dev);  	} -	return err; +	return 0;  }  static int scsi_bus_prepare(struct device *dev) @@ -96,38 +173,32 @@ static int scsi_bus_prepare(struct device *dev)  static int scsi_bus_suspend(struct device *dev)  { -	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; -	return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL); +	return scsi_bus_suspend_common(dev, do_scsi_suspend);  }  static int scsi_bus_resume(struct device *dev)  { -	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; -	return scsi_bus_resume_common(dev, pm ? pm->resume : NULL); +	return scsi_bus_resume_common(dev, do_scsi_resume);  }  static int scsi_bus_freeze(struct device *dev)  { -	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; -	return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL); +	return scsi_bus_suspend_common(dev, do_scsi_freeze);  }  static int scsi_bus_thaw(struct device *dev)  { -	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; -	return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL); +	return scsi_bus_resume_common(dev, do_scsi_thaw);  }  static int scsi_bus_poweroff(struct device *dev)  { -	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; -	return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL); +	return scsi_bus_suspend_common(dev, do_scsi_poweroff);  }  static int scsi_bus_restore(struct device *dev)  { -	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; -	return scsi_bus_resume_common(dev, pm ? pm->restore : NULL); +	return scsi_bus_resume_common(dev, do_scsi_restore);  }  #else /* CONFIG_PM_SLEEP */ @@ -144,38 +215,22 @@ static int scsi_bus_restore(struct device *dev)  #ifdef CONFIG_PM_RUNTIME -static int sdev_blk_runtime_suspend(struct scsi_device *sdev, -					int (*cb)(struct device *)) +static int sdev_runtime_suspend(struct device *dev)  { +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; +	struct scsi_device *sdev = to_scsi_device(dev);  	int err;  	err = blk_pre_runtime_suspend(sdev->request_queue);  	if (err)  		return err; -	if (cb) -		err = cb(&sdev->sdev_gendev); +	if (pm && pm->runtime_suspend) +		err = pm->runtime_suspend(dev);  	blk_post_runtime_suspend(sdev->request_queue, err);  	return err;  } -static int sdev_runtime_suspend(struct device *dev) -{ -	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; -	int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL; -	struct scsi_device *sdev = to_scsi_device(dev); -	int err; - -	if (sdev->request_queue->dev) -		return sdev_blk_runtime_suspend(sdev, cb); - -	err = scsi_dev_type_suspend(dev, cb); -	if (err == -EAGAIN) -		pm_schedule_suspend(dev, jiffies_to_msecs( -					round_jiffies_up_relative(HZ/10))); -	return err; -} -  static int scsi_runtime_suspend(struct device *dev)  {  	int err = 0; @@ -189,31 +244,20 @@ static int scsi_runtime_suspend(struct device *dev)  	return err;  } -static int sdev_blk_runtime_resume(struct scsi_device *sdev, -					int (*cb)(struct device *)) +static int sdev_runtime_resume(struct device *dev)  { +	struct scsi_device *sdev = to_scsi_device(dev); +	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;  	int err = 0;  	blk_pre_runtime_resume(sdev->request_queue); -	if (cb) -		err = cb(&sdev->sdev_gendev); +	if (pm && pm->runtime_resume) +		err = pm->runtime_resume(dev);  	blk_post_runtime_resume(sdev->request_queue, err);  	return err;  } -static int sdev_runtime_resume(struct device *dev) -{ -	struct scsi_device *sdev = to_scsi_device(dev); -	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; -	int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL; - -	if (sdev->request_queue->dev) -		return sdev_blk_runtime_resume(sdev, cb); -	else -		return scsi_dev_type_resume(dev, cb); -} -  static int scsi_runtime_resume(struct device *dev)  {  	int err = 0; @@ -234,14 +278,11 @@ static int scsi_runtime_idle(struct device *dev)  	/* Insert hooks here for targets, hosts, and transport classes */  	if (scsi_is_sdev_device(dev)) { -		struct scsi_device *sdev = to_scsi_device(dev); - -		if (sdev->request_queue->dev) { -			pm_runtime_mark_last_busy(dev); -			pm_runtime_autosuspend(dev); -			return -EBUSY; -		} +		pm_runtime_mark_last_busy(dev); +		pm_runtime_autosuspend(dev); +		return -EBUSY;  	} +  	return 0;  } diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 8f9a0cadc29..48e5b657e79 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -19,6 +19,7 @@ struct scsi_nl_hdr;   * Scsi Error Handler Flags   */  #define SCSI_EH_CANCEL_CMD	0x0001	/* Cancel this cmd */ +#define SCSI_EH_ABORT_SCHEDULED	0x0002	/* Abort has been scheduled */  #define SCSI_SENSE_VALID(scmd) \  	(((scmd)->sense_buffer[0] & 0x70) == 0x70) @@ -66,6 +67,7 @@ extern int __init scsi_init_devinfo(void);  extern void scsi_exit_devinfo(void);  /* scsi_error.c */ +extern void scmd_eh_abort_handler(struct work_struct *work);  extern enum blk_eh_timer_return scsi_times_out(struct request *req);  extern int scsi_error_handler(void *host);  extern int scsi_decide_disposition(struct scsi_cmnd *cmd); @@ -110,6 +112,7 @@ extern void scsi_exit_procfs(void);  #endif /* CONFIG_PROC_FS */  /* scsi_scan.c */ +extern char scsi_scan_type[];  extern int scsi_complete_async_scans(void);  extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,  				   unsigned int, unsigned int, int); @@ -164,6 +167,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }  static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}  #endif /* CONFIG_PM_RUNTIME */ +extern struct async_domain scsi_sd_pm_domain;  extern struct async_domain scsi_sd_probe_domain;  /*  diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 307a8113760..e02b3aab56c 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,  #define SCSI_SCAN_TYPE_DEFAULT "sync"  #endif -static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT; +char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;  module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);  MODULE_PARM_DESC(scan, "sync, async or none"); @@ -320,6 +320,7 @@ static void scsi_target_destroy(struct scsi_target *starget)  	struct Scsi_Host *shost = dev_to_shost(dev->parent);  	unsigned long flags; +	starget->state = STARGET_DEL;  	transport_destroy_device(dev);  	spin_lock_irqsave(shost->host_lock, flags);  	if (shost->hostt->target_destroy) @@ -371,6 +372,37 @@ static struct scsi_target *__scsi_find_target(struct device *parent,  }  /** + * scsi_target_reap_ref_release - remove target from visibility + * @kref: the reap_ref in the target being released + * + * Called on last put of reap_ref, which is the indication that no device + * under this target is visible anymore, so render the target invisible in + * sysfs.  Note: we have to be in user context here because the target reaps + * should be done in places where the scsi device visibility is being removed. + */ +static void scsi_target_reap_ref_release(struct kref *kref) +{ +	struct scsi_target *starget +		= container_of(kref, struct scsi_target, reap_ref); + +	/* +	 * if we get here and the target is still in the CREATED state that +	 * means it was allocated but never made visible (because a scan +	 * turned up no LUNs), so don't call device_del() on it. +	 */ +	if (starget->state != STARGET_CREATED) { +		transport_remove_device(&starget->dev); +		device_del(&starget->dev); +	} +	scsi_target_destroy(starget); +} + +static void scsi_target_reap_ref_put(struct scsi_target *starget) +{ +	kref_put(&starget->reap_ref, scsi_target_reap_ref_release); +} + +/**   * scsi_alloc_target - allocate a new or find an existing target   * @parent:	parent of the target (need not be a scsi host)   * @channel:	target channel number (zero if no channels) @@ -392,7 +424,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,  		+ shost->transportt->target_size;  	struct scsi_target *starget;  	struct scsi_target *found_target; -	int error; +	int error, ref_got;  	starget = kzalloc(size, GFP_KERNEL);  	if (!starget) { @@ -401,7 +433,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,  	}  	dev = &starget->dev;  	device_initialize(dev); -	starget->reap_ref = 1; +	kref_init(&starget->reap_ref);  	dev->parent = get_device(parent);  	dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);  	dev->bus = &scsi_bus_type; @@ -441,29 +473,36 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,  	return starget;   found: -	found_target->reap_ref++; +	/* +	 * release routine already fired if kref is zero, so if we can still +	 * take the reference, the target must be alive.  If we can't, it must +	 * be dying and we need to wait for a new target +	 */ +	ref_got = kref_get_unless_zero(&found_target->reap_ref); +  	spin_unlock_irqrestore(shost->host_lock, flags); -	if (found_target->state != STARGET_DEL) { +	if (ref_got) {  		put_device(dev);  		return found_target;  	} -	/* Unfortunately, we found a dying target; need to -	 * wait until it's dead before we can get a new one */ +	/* +	 * Unfortunately, we found a dying target; need to wait until it's +	 * dead before we can get a new one.  There is an anomaly here.  We +	 * *should* call scsi_target_reap() to balance the kref_get() of the +	 * reap_ref above.  However, since the target being released, it's +	 * already invisible and the reap_ref is irrelevant.  If we call +	 * scsi_target_reap() we might spuriously do another device_del() on +	 * an already invisible target. +	 */  	put_device(&found_target->dev); -	flush_scheduled_work(); +	/* +	 * length of time is irrelevant here, we just want to yield the CPU +	 * for a tick to avoid busy waiting for the target to die. +	 */ +	msleep(1);  	goto retry;  } -static void scsi_target_reap_usercontext(struct work_struct *work) -{ -	struct scsi_target *starget = -		container_of(work, struct scsi_target, ew.work); - -	transport_remove_device(&starget->dev); -	device_del(&starget->dev); -	scsi_target_destroy(starget); -} -  /**   * scsi_target_reap - check to see if target is in use and destroy if not   * @starget: target to be checked @@ -474,28 +513,13 @@ static void scsi_target_reap_usercontext(struct work_struct *work)   */  void scsi_target_reap(struct scsi_target *starget)  { -	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); -	unsigned long flags; -	enum scsi_target_state state; -	int empty = 0; - -	spin_lock_irqsave(shost->host_lock, flags); -	state = starget->state; -	if (--starget->reap_ref == 0 && list_empty(&starget->devices)) { -		empty = 1; -		starget->state = STARGET_DEL; -	} -	spin_unlock_irqrestore(shost->host_lock, flags); - -	if (!empty) -		return; - -	BUG_ON(state == STARGET_DEL); -	if (state == STARGET_CREATED) -		scsi_target_destroy(starget); -	else -		execute_in_process_context(scsi_target_reap_usercontext, -					   &starget->ew); +	/* +	 * serious problem if this triggers: STARGET_DEL is only set in the if +	 * the reap_ref drops to zero, so we're trying to do another final put +	 * on an already released kref +	 */ +	BUG_ON(starget->state == STARGET_DEL); +	scsi_target_reap_ref_put(starget);  }  /** @@ -946,6 +970,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,  		}  	} +	if (sdev->scsi_level >= SCSI_3) +		scsi_attach_vpd(sdev); +  	sdev->max_queue_depth = sdev->queue_depth;  	/* @@ -1532,6 +1559,10 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,  	}  	mutex_unlock(&shost->scan_mutex);  	scsi_autopm_put_target(starget); +	/* +	 * paired with scsi_alloc_target().  Target will be destroyed unless +	 * scsi_probe_and_add_lun made an underlying device visible +	 */  	scsi_target_reap(starget);  	put_device(&starget->dev); @@ -1612,8 +1643,10 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,   out_reap:  	scsi_autopm_put_target(starget); -	/* now determine if the target has any children at all -	 * and if not, nuke it */ +	/* +	 * paired with scsi_alloc_target(): determine if the target has +	 * any children at all and if not, nuke it +	 */  	scsi_target_reap(starget);  	put_device(&starget->dev); diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c index 2b6b93f7d8e..546f16299ef 100644 --- a/drivers/scsi/scsi_sysctl.c +++ b/drivers/scsi/scsi_sysctl.c @@ -12,7 +12,7 @@  #include "scsi_priv.h" -static ctl_table scsi_table[] = { +static struct ctl_table scsi_table[] = {  	{ .procname	= "logging_level",  	  .data		= &scsi_logging_level,  	  .maxlen	= sizeof(scsi_logging_level), @@ -21,14 +21,14 @@ static ctl_table scsi_table[] = {  	{ }  }; -static ctl_table scsi_dir_table[] = { +static struct ctl_table scsi_dir_table[] = {  	{ .procname	= "scsi",  	  .mode		= 0555,  	  .child	= scsi_table },  	{ }  }; -static ctl_table scsi_root_table[] = { +static struct ctl_table scsi_root_table[] = {  	{ .procname	= "dev",  	  .mode		= 0555,  	  .child	= scsi_dir_table }, diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 40c639491b2..074e8cc3095 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -281,6 +281,58 @@ exit_store_host_reset:  static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); +static ssize_t +show_shost_eh_deadline(struct device *dev, +		      struct device_attribute *attr, char *buf) +{ +	struct Scsi_Host *shost = class_to_shost(dev); + +	if (shost->eh_deadline == -1) +		return snprintf(buf, strlen("off") + 2, "off\n"); +	return sprintf(buf, "%u\n", shost->eh_deadline / HZ); +} + +static ssize_t +store_shost_eh_deadline(struct device *dev, struct device_attribute *attr, +		const char *buf, size_t count) +{ +	struct Scsi_Host *shost = class_to_shost(dev); +	int ret = -EINVAL; +	unsigned long deadline, flags; + +	if (shost->transportt && +	    (shost->transportt->eh_strategy_handler || +	     !shost->hostt->eh_host_reset_handler)) +		return ret; + +	if (!strncmp(buf, "off", strlen("off"))) +		deadline = -1; +	else { +		ret = kstrtoul(buf, 10, &deadline); +		if (ret) +			return ret; +		if (deadline * HZ > UINT_MAX) +			return -EINVAL; +	} + +	spin_lock_irqsave(shost->host_lock, flags); +	if (scsi_host_in_recovery(shost)) +		ret = -EBUSY; +	else { +		if (deadline == -1) +			shost->eh_deadline = -1; +		else +			shost->eh_deadline = deadline * HZ; + +		ret = count; +	} +	spin_unlock_irqrestore(shost->host_lock, flags); + +	return ret; +} + +static DEVICE_ATTR(eh_deadline, S_IRUGO | S_IWUSR, show_shost_eh_deadline, store_shost_eh_deadline); +  shost_rd_attr(unique_id, "%u\n");  shost_rd_attr(host_busy, "%hu\n");  shost_rd_attr(cmd_per_lun, "%hd\n"); @@ -308,6 +360,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {  	&dev_attr_prot_capabilities.attr,  	&dev_attr_prot_guard_type.attr,  	&dev_attr_host_reset.attr, +	&dev_attr_eh_deadline.attr,  	NULL  }; @@ -332,17 +385,14 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)  {  	struct scsi_device *sdev;  	struct device *parent; -	struct scsi_target *starget;  	struct list_head *this, *tmp;  	unsigned long flags;  	sdev = container_of(work, struct scsi_device, ew.work);  	parent = sdev->sdev_gendev.parent; -	starget = to_scsi_target(parent);  	spin_lock_irqsave(sdev->host->host_lock, flags); -	starget->reap_ref++;  	list_del(&sdev->siblings);  	list_del(&sdev->same_target_siblings);  	list_del(&sdev->starved_entry); @@ -362,8 +412,8 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)  	/* NULL queue means the device can't be used */  	sdev->request_queue = NULL; -	scsi_target_reap(scsi_target(sdev)); - +	kfree(sdev->vpd_pg83); +	kfree(sdev->vpd_pg80);  	kfree(sdev->inquiry);  	kfree(sdev); @@ -528,7 +578,7 @@ static int scsi_sdev_check_buf_bit(const char *buf)   * Create the actual show/store functions and data structures.   */  sdev_rd_attr (device_blocked, "%d\n"); -sdev_rd_attr (queue_depth, "%d\n"); +sdev_rd_attr (device_busy, "%d\n");  sdev_rd_attr (type, "%d\n");  sdev_rd_attr (scsi_level, "%d\n");  sdev_rd_attr (vendor, "%.8s\n"); @@ -597,23 +647,12 @@ store_rescan_field (struct device *dev, struct device_attribute *attr,  }  static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); -static void sdev_store_delete_callback(struct device *dev) -{ -	scsi_remove_device(to_scsi_device(dev)); -} -  static ssize_t  sdev_store_delete(struct device *dev, struct device_attribute *attr,  		  const char *buf, size_t count)  { -	int rc; - -	/* An attribute cannot be unregistered by one of its own methods, -	 * so we have to use this roundabout approach. -	 */ -	rc = device_schedule_callback(dev, sdev_store_delete_callback); -	if (rc) -		count = rc; +	if (device_remove_file_self(dev, attr)) +		scsi_remove_device(to_scsi_device(dev));  	return count;  };  static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); @@ -671,10 +710,64 @@ show_queue_type_field(struct device *dev, struct device_attribute *attr,  	return snprintf(buf, 20, "%s\n", name);  } -static DEVICE_ATTR(queue_type, S_IRUGO, show_queue_type_field, NULL); +static ssize_t +store_queue_type_field(struct device *dev, struct device_attribute *attr, +		       const char *buf, size_t count) +{ +	struct scsi_device *sdev = to_scsi_device(dev); +	struct scsi_host_template *sht = sdev->host->hostt; +	int tag_type = 0, retval; +	int prev_tag_type = scsi_get_tag_type(sdev); + +	if (!sdev->tagged_supported || !sht->change_queue_type) +		return -EINVAL; + +	if (strncmp(buf, "ordered", 7) == 0) +		tag_type = MSG_ORDERED_TAG; +	else if (strncmp(buf, "simple", 6) == 0) +		tag_type = MSG_SIMPLE_TAG; +	else if (strncmp(buf, "none", 4) != 0) +		return -EINVAL; + +	if (tag_type == prev_tag_type) +		return count; + +	retval = sht->change_queue_type(sdev, tag_type); +	if (retval < 0) +		return retval; + +	return count; +} + +static DEVICE_ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, +		   store_queue_type_field); + +#define sdev_vpd_pg_attr(_page)						\ +static ssize_t							\ +show_vpd_##_page(struct file *filp, struct kobject *kobj,	\ +		 struct bin_attribute *bin_attr,			\ +		 char *buf, loff_t off, size_t count)			\ +{									\ +	struct device *dev = container_of(kobj, struct device, kobj);	\ +	struct scsi_device *sdev = to_scsi_device(dev);			\ +	if (!sdev->vpd_##_page)						\ +		return -EINVAL;						\ +	return memory_read_from_buffer(buf, count, &off,		\ +				       sdev->vpd_##_page,		\ +				       sdev->vpd_##_page##_len);	\ +}									\ +static struct bin_attribute dev_attr_vpd_##_page = {		\ +	.attr =	{.name = __stringify(vpd_##_page), .mode = S_IRUGO },	\ +	.size = 0,							\ +	.read = show_vpd_##_page,					\ +}; + +sdev_vpd_pg_attr(pg83); +sdev_vpd_pg_attr(pg80);  static ssize_t -show_iostat_counterbits(struct device *dev, struct device_attribute *attr, 				char *buf) +show_iostat_counterbits(struct device *dev, struct device_attribute *attr, +			char *buf)  {  	return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);  } @@ -745,45 +838,9 @@ DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)  DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)  DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED) -/* Default template for device attributes.  May NOT be modified */ -static struct attribute *scsi_sdev_attrs[] = { -	&dev_attr_device_blocked.attr, -	&dev_attr_type.attr, -	&dev_attr_scsi_level.attr, -	&dev_attr_vendor.attr, -	&dev_attr_model.attr, -	&dev_attr_rev.attr, -	&dev_attr_rescan.attr, -	&dev_attr_delete.attr, -	&dev_attr_state.attr, -	&dev_attr_timeout.attr, -	&dev_attr_eh_timeout.attr, -	&dev_attr_iocounterbits.attr, -	&dev_attr_iorequest_cnt.attr, -	&dev_attr_iodone_cnt.attr, -	&dev_attr_ioerr_cnt.attr, -	&dev_attr_modalias.attr, -	REF_EVT(media_change), -	REF_EVT(inquiry_change_reported), -	REF_EVT(capacity_change_reported), -	REF_EVT(soft_threshold_reached), -	REF_EVT(mode_parameter_change_reported), -	REF_EVT(lun_change_reported), -	NULL -}; - -static struct attribute_group scsi_sdev_attr_group = { -	.attrs =	scsi_sdev_attrs, -}; - -static const struct attribute_group *scsi_sdev_attr_groups[] = { -	&scsi_sdev_attr_group, -	NULL -}; -  static ssize_t -sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr, -			  const char *buf, size_t count) +sdev_store_queue_depth(struct device *dev, struct device_attribute *attr, +		       const char *buf, size_t count)  {  	int depth, retval;  	struct scsi_device *sdev = to_scsi_device(dev); @@ -806,10 +863,10 @@ sdev_store_queue_depth_rw(struct device *dev, struct device_attribute *attr,  	return count;  } +sdev_show_function(queue_depth, "%d\n"); -static struct device_attribute sdev_attr_queue_depth_rw = -	__ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, -	       sdev_store_queue_depth_rw); +static DEVICE_ATTR(queue_depth, S_IRUGO | S_IWUSR, sdev_show_queue_depth, +		   sdev_store_queue_depth);  static ssize_t  sdev_show_queue_ramp_up_period(struct device *dev, @@ -837,40 +894,79 @@ sdev_store_queue_ramp_up_period(struct device *dev,  	return period;  } -static struct device_attribute sdev_attr_queue_ramp_up_period = -	__ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, -	       sdev_show_queue_ramp_up_period, -	       sdev_store_queue_ramp_up_period); +static DEVICE_ATTR(queue_ramp_up_period, S_IRUGO | S_IWUSR, +		   sdev_show_queue_ramp_up_period, +		   sdev_store_queue_ramp_up_period); -static ssize_t -sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr, -			 const char *buf, size_t count) +static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj, +					 struct attribute *attr, int i)  { +	struct device *dev = container_of(kobj, struct device, kobj);  	struct scsi_device *sdev = to_scsi_device(dev); -	struct scsi_host_template *sht = sdev->host->hostt; -	int tag_type = 0, retval; -	int prev_tag_type = scsi_get_tag_type(sdev); -	if (!sdev->tagged_supported || !sht->change_queue_type) -		return -EINVAL; -	if (strncmp(buf, "ordered", 7) == 0) -		tag_type = MSG_ORDERED_TAG; -	else if (strncmp(buf, "simple", 6) == 0) -		tag_type = MSG_SIMPLE_TAG; -	else if (strncmp(buf, "none", 4) != 0) -		return -EINVAL; +	if (attr == &dev_attr_queue_depth.attr && +	    !sdev->host->hostt->change_queue_depth) +		return S_IRUGO; -	if (tag_type == prev_tag_type) -		return count; +	if (attr == &dev_attr_queue_ramp_up_period.attr && +	    !sdev->host->hostt->change_queue_depth) +		return 0; -	retval = sht->change_queue_type(sdev, tag_type); -	if (retval < 0) -		return retval; +	if (attr == &dev_attr_queue_type.attr && +	    !sdev->host->hostt->change_queue_type) +		return S_IRUGO; -	return count; +	return attr->mode;  } +/* Default template for device attributes.  May NOT be modified */ +static struct attribute *scsi_sdev_attrs[] = { +	&dev_attr_device_blocked.attr, +	&dev_attr_type.attr, +	&dev_attr_scsi_level.attr, +	&dev_attr_device_busy.attr, +	&dev_attr_vendor.attr, +	&dev_attr_model.attr, +	&dev_attr_rev.attr, +	&dev_attr_rescan.attr, +	&dev_attr_delete.attr, +	&dev_attr_state.attr, +	&dev_attr_timeout.attr, +	&dev_attr_eh_timeout.attr, +	&dev_attr_iocounterbits.attr, +	&dev_attr_iorequest_cnt.attr, +	&dev_attr_iodone_cnt.attr, +	&dev_attr_ioerr_cnt.attr, +	&dev_attr_modalias.attr, +	&dev_attr_queue_depth.attr, +	&dev_attr_queue_type.attr, +	&dev_attr_queue_ramp_up_period.attr, +	REF_EVT(media_change), +	REF_EVT(inquiry_change_reported), +	REF_EVT(capacity_change_reported), +	REF_EVT(soft_threshold_reached), +	REF_EVT(mode_parameter_change_reported), +	REF_EVT(lun_change_reported), +	NULL +}; + +static struct bin_attribute *scsi_sdev_bin_attrs[] = { +	&dev_attr_vpd_pg83, +	&dev_attr_vpd_pg80, +	NULL +}; +static struct attribute_group scsi_sdev_attr_group = { +	.attrs =	scsi_sdev_attrs, +	.bin_attrs =	scsi_sdev_bin_attrs, +	.is_visible =	scsi_sdev_attr_is_visible, +}; + +static const struct attribute_group *scsi_sdev_attr_groups[] = { +	&scsi_sdev_attr_group, +	NULL +}; +  static int scsi_target_add(struct scsi_target *starget)  {  	int error; @@ -893,10 +989,6 @@ static int scsi_target_add(struct scsi_target *starget)  	return 0;  } -static struct device_attribute sdev_attr_queue_type_rw = -	__ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field, -	       sdev_store_queue_type_rw); -  /**   * scsi_sysfs_add_sdev - add scsi device to sysfs   * @sdev:	scsi_device to add @@ -950,25 +1042,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)  	transport_add_device(&sdev->sdev_gendev);  	sdev->is_visible = 1; -	/* create queue files, which may be writable, depending on the host */ -	if (sdev->host->hostt->change_queue_depth) { -		error = device_create_file(&sdev->sdev_gendev, -					   &sdev_attr_queue_depth_rw); -		error = device_create_file(&sdev->sdev_gendev, -					   &sdev_attr_queue_ramp_up_period); -	} -	else -		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth); -	if (error) -		return error; - -	if (sdev->host->hostt->change_queue_type) -		error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw); -	else -		error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type); -	if (error) -		return error; -  	error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);  	if (error) @@ -1018,6 +1091,13 @@ void __scsi_remove_device(struct scsi_device *sdev)  		sdev->host->hostt->slave_destroy(sdev);  	transport_destroy_device(dev); +	/* +	 * Paired with the kref_get() in scsi_sysfs_initialize().  We have +	 * remoed sysfs visibility from the device, so make the target +	 * invisible if this was the last device underneath it. +	 */ +	scsi_target_reap(scsi_target(sdev)); +  	put_device(dev);  } @@ -1080,7 +1160,7 @@ void scsi_remove_target(struct device *dev)  			continue;  		if (starget->dev.parent == dev || &starget->dev == dev) {  			/* assuming new targets arrive at the end */ -			starget->reap_ref++; +			kref_get(&starget->reap_ref);  			spin_unlock_irqrestore(shost->host_lock, flags);  			if (last)  				scsi_target_reap(last); @@ -1164,6 +1244,12 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)  	list_add_tail(&sdev->same_target_siblings, &starget->devices);  	list_add_tail(&sdev->siblings, &shost->__devices);  	spin_unlock_irqrestore(shost->host_lock, flags); +	/* +	 * device can now only be removed via __scsi_remove_device() so hold +	 * the target.  Target will be held in CREATED state until something +	 * beneath it becomes visible (in which case it moves to RUNNING) +	 */ +	kref_get(&starget->reap_ref);  }  int scsi_is_sdev_device(const struct device *dev) diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 84a1fdf6786..e51add05fb8 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c @@ -155,7 +155,8 @@ void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)  	__blk_put_request(q, rq);  	spin_unlock_irqrestore(q->queue_lock, flags); -	__scsi_put_command(shost, cmd, &shost->shost_gendev); +	__scsi_put_command(shost, cmd); +	put_device(&shost->shost_gendev);  }  EXPORT_SYMBOL_GPL(scsi_host_put_command); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 4628fd5e068..521f5838594 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -261,6 +261,7 @@ static const struct {  	{ FC_PORTSPEED_10GBIT,		"10 Gbit" },  	{ FC_PORTSPEED_8GBIT,		"8 Gbit" },  	{ FC_PORTSPEED_16GBIT,		"16 Gbit" }, +	{ FC_PORTSPEED_32GBIT,		"32 Gbit" },  	{ FC_PORTSPEED_NOT_NEGOTIATED,	"Not Negotiated" },  };  fc_bitfield_name_search(port_speed, fc_port_speed_names) @@ -2548,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work)  			fc_flush_devloss(shost);  		if (!cancel_delayed_work(&rport->dev_loss_work))  			fc_flush_devloss(shost); +		cancel_work_sync(&rport->scan_work);  		spin_lock_irqsave(shost->host_lock, flags);  		rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;  	} diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index e4a989fa477..0102a2d70dd 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -305,20 +305,71 @@ show_##type##_##name(struct device *dev, struct device_attribute *attr,	\  	iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param)	\  static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); -/* generic read only ipvi4 attribute */ +#define iscsi_iface_attr(type, name, param)				\ +	iscsi_iface_attr_show(type, name, ISCSI_IFACE_PARAM, param)	\ +static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); + +/* generic read only ipv4 attribute */  iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR);  iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW);  iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET);  iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO); +iscsi_iface_net_attr(ipv4_iface, dhcp_dns_address_en, +		     ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_slp_da_info_en, +		     ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN); +iscsi_iface_net_attr(ipv4_iface, tos_en, ISCSI_NET_PARAM_IPV4_TOS_EN); +iscsi_iface_net_attr(ipv4_iface, tos, ISCSI_NET_PARAM_IPV4_TOS); +iscsi_iface_net_attr(ipv4_iface, grat_arp_en, +		     ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id_en, +		     ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_alt_client_id, +		     ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID); +iscsi_iface_net_attr(ipv4_iface, dhcp_req_vendor_id_en, +		     ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_use_vendor_id_en, +		     ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN); +iscsi_iface_net_attr(ipv4_iface, dhcp_vendor_id, +		     ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID); +iscsi_iface_net_attr(ipv4_iface, dhcp_learn_iqn_en, +		     ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN); +iscsi_iface_net_attr(ipv4_iface, fragment_disable, +		     ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE); +iscsi_iface_net_attr(ipv4_iface, incoming_forwarding_en, +		     ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN); +iscsi_iface_net_attr(ipv4_iface, ttl, ISCSI_NET_PARAM_IPV4_TTL);  /* generic read only ipv6 attribute */  iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR); -iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL); +iscsi_iface_net_attr(ipv6_iface, link_local_addr, +		     ISCSI_NET_PARAM_IPV6_LINKLOCAL);  iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER);  iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg,  		     ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG);  iscsi_iface_net_attr(ipv6_iface, link_local_autocfg,  		     ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG); +iscsi_iface_net_attr(ipv6_iface, link_local_state, +		     ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE); +iscsi_iface_net_attr(ipv6_iface, router_state, +		     ISCSI_NET_PARAM_IPV6_ROUTER_STATE); +iscsi_iface_net_attr(ipv6_iface, grat_neighbor_adv_en, +		     ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN); +iscsi_iface_net_attr(ipv6_iface, mld_en, ISCSI_NET_PARAM_IPV6_MLD_EN); +iscsi_iface_net_attr(ipv6_iface, flow_label, ISCSI_NET_PARAM_IPV6_FLOW_LABEL); +iscsi_iface_net_attr(ipv6_iface, traffic_class, +		     ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS); +iscsi_iface_net_attr(ipv6_iface, hop_limit, ISCSI_NET_PARAM_IPV6_HOP_LIMIT); +iscsi_iface_net_attr(ipv6_iface, nd_reachable_tmo, +		     ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO); +iscsi_iface_net_attr(ipv6_iface, nd_rexmit_time, +		     ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME); +iscsi_iface_net_attr(ipv6_iface, nd_stale_tmo, +		     ISCSI_NET_PARAM_IPV6_ND_STALE_TMO); +iscsi_iface_net_attr(ipv6_iface, dup_addr_detect_cnt, +		     ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT); +iscsi_iface_net_attr(ipv6_iface, router_adv_link_mtu, +		     ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU);  /* common read only iface attribute */  iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE); @@ -327,6 +378,40 @@ iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY);  iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);  iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);  iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT); +iscsi_iface_net_attr(iface, ipaddress_state, ISCSI_NET_PARAM_IPADDR_STATE); +iscsi_iface_net_attr(iface, delayed_ack_en, ISCSI_NET_PARAM_DELAYED_ACK_EN); +iscsi_iface_net_attr(iface, tcp_nagle_disable, +		     ISCSI_NET_PARAM_TCP_NAGLE_DISABLE); +iscsi_iface_net_attr(iface, tcp_wsf_disable, ISCSI_NET_PARAM_TCP_WSF_DISABLE); +iscsi_iface_net_attr(iface, tcp_wsf, ISCSI_NET_PARAM_TCP_WSF); +iscsi_iface_net_attr(iface, tcp_timer_scale, ISCSI_NET_PARAM_TCP_TIMER_SCALE); +iscsi_iface_net_attr(iface, tcp_timestamp_en, ISCSI_NET_PARAM_TCP_TIMESTAMP_EN); +iscsi_iface_net_attr(iface, cache_id, ISCSI_NET_PARAM_CACHE_ID); +iscsi_iface_net_attr(iface, redirect_en, ISCSI_NET_PARAM_REDIRECT_EN); + +/* common iscsi specific settings attributes */ +iscsi_iface_attr(iface, def_taskmgmt_tmo, ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO); +iscsi_iface_attr(iface, header_digest, ISCSI_IFACE_PARAM_HDRDGST_EN); +iscsi_iface_attr(iface, data_digest, ISCSI_IFACE_PARAM_DATADGST_EN); +iscsi_iface_attr(iface, immediate_data, ISCSI_IFACE_PARAM_IMM_DATA_EN); +iscsi_iface_attr(iface, initial_r2t, ISCSI_IFACE_PARAM_INITIAL_R2T_EN); +iscsi_iface_attr(iface, data_seq_in_order, +		 ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN); +iscsi_iface_attr(iface, data_pdu_in_order, ISCSI_IFACE_PARAM_PDU_INORDER_EN); +iscsi_iface_attr(iface, erl, ISCSI_IFACE_PARAM_ERL); +iscsi_iface_attr(iface, max_recv_dlength, ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH); +iscsi_iface_attr(iface, first_burst_len, ISCSI_IFACE_PARAM_FIRST_BURST); +iscsi_iface_attr(iface, max_outstanding_r2t, ISCSI_IFACE_PARAM_MAX_R2T); +iscsi_iface_attr(iface, max_burst_len, ISCSI_IFACE_PARAM_MAX_BURST); +iscsi_iface_attr(iface, chap_auth, ISCSI_IFACE_PARAM_CHAP_AUTH_EN); +iscsi_iface_attr(iface, bidi_chap, ISCSI_IFACE_PARAM_BIDI_CHAP_EN); +iscsi_iface_attr(iface, discovery_auth_optional, +		 ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL); +iscsi_iface_attr(iface, discovery_logout, +		 ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN); +iscsi_iface_attr(iface, strict_login_comp_en, +		 ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN); +iscsi_iface_attr(iface, initiator_name, ISCSI_IFACE_PARAM_INITIATOR_NAME);  static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,  					  struct attribute *attr, int i) @@ -335,6 +420,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,  	struct iscsi_iface *iface = iscsi_dev_to_iface(dev);  	struct iscsi_transport *t = iface->transport;  	int param; +	int param_type;  	if (attr == &dev_attr_iface_enabled.attr)  		param = ISCSI_NET_PARAM_IFACE_ENABLE; @@ -348,6 +434,60 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,  		param = ISCSI_NET_PARAM_MTU;  	else if (attr == &dev_attr_iface_port.attr)  		param = ISCSI_NET_PARAM_PORT; +	else if (attr == &dev_attr_iface_ipaddress_state.attr) +		param = ISCSI_NET_PARAM_IPADDR_STATE; +	else if (attr == &dev_attr_iface_delayed_ack_en.attr) +		param = ISCSI_NET_PARAM_DELAYED_ACK_EN; +	else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) +		param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; +	else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) +		param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; +	else if (attr == &dev_attr_iface_tcp_wsf.attr) +		param = ISCSI_NET_PARAM_TCP_WSF; +	else if (attr == &dev_attr_iface_tcp_timer_scale.attr) +		param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; +	else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) +		param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; +	else if (attr == &dev_attr_iface_cache_id.attr) +		param = ISCSI_NET_PARAM_CACHE_ID; +	else if (attr == &dev_attr_iface_redirect_en.attr) +		param = ISCSI_NET_PARAM_REDIRECT_EN; +	else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) +		param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; +	else if (attr == &dev_attr_iface_header_digest.attr) +		param = ISCSI_IFACE_PARAM_HDRDGST_EN; +	else if (attr == &dev_attr_iface_data_digest.attr) +		param = ISCSI_IFACE_PARAM_DATADGST_EN; +	else if (attr == &dev_attr_iface_immediate_data.attr) +		param = ISCSI_IFACE_PARAM_IMM_DATA_EN; +	else if (attr == &dev_attr_iface_initial_r2t.attr) +		param = ISCSI_IFACE_PARAM_INITIAL_R2T_EN; +	else if (attr == &dev_attr_iface_data_seq_in_order.attr) +		param = ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN; +	else if (attr == &dev_attr_iface_data_pdu_in_order.attr) +		param = ISCSI_IFACE_PARAM_PDU_INORDER_EN; +	else if (attr == &dev_attr_iface_erl.attr) +		param = ISCSI_IFACE_PARAM_ERL; +	else if (attr == &dev_attr_iface_max_recv_dlength.attr) +		param = ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH; +	else if (attr == &dev_attr_iface_first_burst_len.attr) +		param = ISCSI_IFACE_PARAM_FIRST_BURST; +	else if (attr == &dev_attr_iface_max_outstanding_r2t.attr) +		param = ISCSI_IFACE_PARAM_MAX_R2T; +	else if (attr == &dev_attr_iface_max_burst_len.attr) +		param = ISCSI_IFACE_PARAM_MAX_BURST; +	else if (attr == &dev_attr_iface_chap_auth.attr) +		param = ISCSI_IFACE_PARAM_CHAP_AUTH_EN; +	else if (attr == &dev_attr_iface_bidi_chap.attr) +		param = ISCSI_IFACE_PARAM_BIDI_CHAP_EN; +	else if (attr == &dev_attr_iface_discovery_auth_optional.attr) +		param = ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL; +	else if (attr == &dev_attr_iface_discovery_logout.attr) +		param = ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN; +	else if (attr == &dev_attr_iface_strict_login_comp_en.attr) +		param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; +	else if (attr == &dev_attr_iface_initiator_name.attr) +		param = ISCSI_IFACE_PARAM_INITIATOR_NAME;  	else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {  		if (attr == &dev_attr_ipv4_iface_ipaddress.attr)  			param = ISCSI_NET_PARAM_IPV4_ADDR; @@ -357,6 +497,42 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,  			param = ISCSI_NET_PARAM_IPV4_SUBNET;  		else if (attr == &dev_attr_ipv4_iface_bootproto.attr)  			param = ISCSI_NET_PARAM_IPV4_BOOTPROTO; +		else if (attr == +			 &dev_attr_ipv4_iface_dhcp_dns_address_en.attr) +			param = ISCSI_NET_PARAM_IPV4_DHCP_DNS_ADDR_EN; +		else if (attr == +			 &dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr) +			param = ISCSI_NET_PARAM_IPV4_DHCP_SLP_DA_EN; +		else if (attr == &dev_attr_ipv4_iface_tos_en.attr) +			param = ISCSI_NET_PARAM_IPV4_TOS_EN; +		else if (attr == &dev_attr_ipv4_iface_tos.attr) +			param = ISCSI_NET_PARAM_IPV4_TOS; +		else if (attr == &dev_attr_ipv4_iface_grat_arp_en.attr) +			param = ISCSI_NET_PARAM_IPV4_GRAT_ARP_EN; +		else if (attr == +			 &dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr) +			param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID_EN; +		else if (attr == &dev_attr_ipv4_iface_dhcp_alt_client_id.attr) +			param = ISCSI_NET_PARAM_IPV4_DHCP_ALT_CLIENT_ID; +		else if (attr == +			 &dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr) +			param = ISCSI_NET_PARAM_IPV4_DHCP_REQ_VENDOR_ID_EN; +		else if (attr == +			 &dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr) +			param = ISCSI_NET_PARAM_IPV4_DHCP_USE_VENDOR_ID_EN; +		else if (attr == &dev_attr_ipv4_iface_dhcp_vendor_id.attr) +			param = ISCSI_NET_PARAM_IPV4_DHCP_VENDOR_ID; +		else if (attr == +			 &dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr) +			param = ISCSI_NET_PARAM_IPV4_DHCP_LEARN_IQN_EN; +		else if (attr == +			 &dev_attr_ipv4_iface_fragment_disable.attr) +			param = ISCSI_NET_PARAM_IPV4_FRAGMENT_DISABLE; +		else if (attr == +			 &dev_attr_ipv4_iface_incoming_forwarding_en.attr) +			param = ISCSI_NET_PARAM_IPV4_IN_FORWARD_EN; +		else if (attr == &dev_attr_ipv4_iface_ttl.attr) +			param = ISCSI_NET_PARAM_IPV4_TTL;  		else  			return 0;  	} else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) { @@ -370,6 +546,31 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,  			param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG;  		else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr)  			param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG; +		else if (attr == &dev_attr_ipv6_iface_link_local_state.attr) +			param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_STATE; +		else if (attr == &dev_attr_ipv6_iface_router_state.attr) +			param = ISCSI_NET_PARAM_IPV6_ROUTER_STATE; +		else if (attr == +			 &dev_attr_ipv6_iface_grat_neighbor_adv_en.attr) +			param = ISCSI_NET_PARAM_IPV6_GRAT_NEIGHBOR_ADV_EN; +		else if (attr == &dev_attr_ipv6_iface_mld_en.attr) +			param = ISCSI_NET_PARAM_IPV6_MLD_EN; +		else if (attr == &dev_attr_ipv6_iface_flow_label.attr) +			param = ISCSI_NET_PARAM_IPV6_FLOW_LABEL; +		else if (attr == &dev_attr_ipv6_iface_traffic_class.attr) +			param = ISCSI_NET_PARAM_IPV6_TRAFFIC_CLASS; +		else if (attr == &dev_attr_ipv6_iface_hop_limit.attr) +			param = ISCSI_NET_PARAM_IPV6_HOP_LIMIT; +		else if (attr == &dev_attr_ipv6_iface_nd_reachable_tmo.attr) +			param = ISCSI_NET_PARAM_IPV6_ND_REACHABLE_TMO; +		else if (attr == &dev_attr_ipv6_iface_nd_rexmit_time.attr) +			param = ISCSI_NET_PARAM_IPV6_ND_REXMIT_TIME; +		else if (attr == &dev_attr_ipv6_iface_nd_stale_tmo.attr) +			param = ISCSI_NET_PARAM_IPV6_ND_STALE_TMO; +		else if (attr == &dev_attr_ipv6_iface_dup_addr_detect_cnt.attr) +			param = ISCSI_NET_PARAM_IPV6_DUP_ADDR_DETECT_CNT; +		else if (attr == &dev_attr_ipv6_iface_router_adv_link_mtu.attr) +			param = ISCSI_NET_PARAM_IPV6_RTR_ADV_LINK_MTU;  		else  			return 0;  	} else { @@ -377,7 +578,32 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,  		return 0;  	} -	return t->attr_is_visible(ISCSI_NET_PARAM, param); +	switch (param) { +	case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: +	case ISCSI_IFACE_PARAM_HDRDGST_EN: +	case ISCSI_IFACE_PARAM_DATADGST_EN: +	case ISCSI_IFACE_PARAM_IMM_DATA_EN: +	case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: +	case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: +	case ISCSI_IFACE_PARAM_PDU_INORDER_EN: +	case ISCSI_IFACE_PARAM_ERL: +	case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: +	case ISCSI_IFACE_PARAM_FIRST_BURST: +	case ISCSI_IFACE_PARAM_MAX_R2T: +	case ISCSI_IFACE_PARAM_MAX_BURST: +	case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: +	case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: +	case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: +	case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: +	case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: +	case ISCSI_IFACE_PARAM_INITIATOR_NAME: +		param_type = ISCSI_IFACE_PARAM; +		break; +	default: +		param_type = ISCSI_NET_PARAM; +	} + +	return t->attr_is_visible(param_type, param);  }  static struct attribute *iscsi_iface_attrs[] = { @@ -396,6 +622,59 @@ static struct attribute *iscsi_iface_attrs[] = {  	&dev_attr_ipv6_iface_link_local_autocfg.attr,  	&dev_attr_iface_mtu.attr,  	&dev_attr_iface_port.attr, +	&dev_attr_iface_ipaddress_state.attr, +	&dev_attr_iface_delayed_ack_en.attr, +	&dev_attr_iface_tcp_nagle_disable.attr, +	&dev_attr_iface_tcp_wsf_disable.attr, +	&dev_attr_iface_tcp_wsf.attr, +	&dev_attr_iface_tcp_timer_scale.attr, +	&dev_attr_iface_tcp_timestamp_en.attr, +	&dev_attr_iface_cache_id.attr, +	&dev_attr_iface_redirect_en.attr, +	&dev_attr_iface_def_taskmgmt_tmo.attr, +	&dev_attr_iface_header_digest.attr, +	&dev_attr_iface_data_digest.attr, +	&dev_attr_iface_immediate_data.attr, +	&dev_attr_iface_initial_r2t.attr, +	&dev_attr_iface_data_seq_in_order.attr, +	&dev_attr_iface_data_pdu_in_order.attr, +	&dev_attr_iface_erl.attr, +	&dev_attr_iface_max_recv_dlength.attr, +	&dev_attr_iface_first_burst_len.attr, +	&dev_attr_iface_max_outstanding_r2t.attr, +	&dev_attr_iface_max_burst_len.attr, +	&dev_attr_iface_chap_auth.attr, +	&dev_attr_iface_bidi_chap.attr, +	&dev_attr_iface_discovery_auth_optional.attr, +	&dev_attr_iface_discovery_logout.attr, +	&dev_attr_iface_strict_login_comp_en.attr, +	&dev_attr_iface_initiator_name.attr, +	&dev_attr_ipv4_iface_dhcp_dns_address_en.attr, +	&dev_attr_ipv4_iface_dhcp_slp_da_info_en.attr, +	&dev_attr_ipv4_iface_tos_en.attr, +	&dev_attr_ipv4_iface_tos.attr, +	&dev_attr_ipv4_iface_grat_arp_en.attr, +	&dev_attr_ipv4_iface_dhcp_alt_client_id_en.attr, +	&dev_attr_ipv4_iface_dhcp_alt_client_id.attr, +	&dev_attr_ipv4_iface_dhcp_req_vendor_id_en.attr, +	&dev_attr_ipv4_iface_dhcp_use_vendor_id_en.attr, +	&dev_attr_ipv4_iface_dhcp_vendor_id.attr, +	&dev_attr_ipv4_iface_dhcp_learn_iqn_en.attr, +	&dev_attr_ipv4_iface_fragment_disable.attr, +	&dev_attr_ipv4_iface_incoming_forwarding_en.attr, +	&dev_attr_ipv4_iface_ttl.attr, +	&dev_attr_ipv6_iface_link_local_state.attr, +	&dev_attr_ipv6_iface_router_state.attr, +	&dev_attr_ipv6_iface_grat_neighbor_adv_en.attr, +	&dev_attr_ipv6_iface_mld_en.attr, +	&dev_attr_ipv6_iface_flow_label.attr, +	&dev_attr_ipv6_iface_traffic_class.attr, +	&dev_attr_ipv6_iface_hop_limit.attr, +	&dev_attr_ipv6_iface_nd_reachable_tmo.attr, +	&dev_attr_ipv6_iface_nd_rexmit_time.attr, +	&dev_attr_ipv6_iface_nd_stale_tmo.attr, +	&dev_attr_ipv6_iface_dup_addr_detect_cnt.attr, +	&dev_attr_ipv6_iface_router_adv_link_mtu.attr,  	NULL,  }; @@ -404,6 +683,61 @@ static struct attribute_group iscsi_iface_group = {  	.is_visible = iscsi_iface_attr_is_visible,  }; +/* convert iscsi_ipaddress_state values to ascii string name */ +static const struct { +	enum iscsi_ipaddress_state	value; +	char				*name; +} iscsi_ipaddress_state_names[] = { +	{ISCSI_IPDDRESS_STATE_UNCONFIGURED,	"Unconfigured" }, +	{ISCSI_IPDDRESS_STATE_ACQUIRING,	"Acquiring" }, +	{ISCSI_IPDDRESS_STATE_TENTATIVE,	"Tentative" }, +	{ISCSI_IPDDRESS_STATE_VALID,		"Valid" }, +	{ISCSI_IPDDRESS_STATE_DISABLING,	"Disabling" }, +	{ISCSI_IPDDRESS_STATE_INVALID,		"Invalid" }, +	{ISCSI_IPDDRESS_STATE_DEPRECATED,	"Deprecated" }, +}; + +char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state port_state) +{ +	int i; +	char *state = NULL; + +	for (i = 0; i < ARRAY_SIZE(iscsi_ipaddress_state_names); i++) { +		if (iscsi_ipaddress_state_names[i].value == port_state) { +			state = iscsi_ipaddress_state_names[i].name; +			break; +		} +	} +	return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_ipaddress_state_name); + +/* convert iscsi_router_state values to ascii string name */ +static const struct { +	enum iscsi_router_state	value; +	char			*name; +} iscsi_router_state_names[] = { +	{ISCSI_ROUTER_STATE_UNKNOWN,		"Unknown" }, +	{ISCSI_ROUTER_STATE_ADVERTISED,		"Advertised" }, +	{ISCSI_ROUTER_STATE_MANUAL,		"Manual" }, +	{ISCSI_ROUTER_STATE_STALE,		"Stale" }, +}; + +char *iscsi_get_router_state_name(enum iscsi_router_state router_state) +{ +	int i; +	char *state = NULL; + +	for (i = 0; i < ARRAY_SIZE(iscsi_router_state_names); i++) { +		if (iscsi_router_state_names[i].value == router_state) { +			state = iscsi_router_state_names[i].name; +			break; +		} +	} +	return state; +} +EXPORT_SYMBOL_GPL(iscsi_get_router_state_name); +  struct iscsi_iface *  iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,  		   uint32_t iface_type, uint32_t iface_num, int dd_size) @@ -891,7 +1225,7 @@ struct bus_type iscsi_flashnode_bus = {   * Adds a sysfs entry for the flashnode session attributes   *   * Returns: - *  pointer to allocated flashnode sess on sucess + *  pointer to allocated flashnode sess on success   *  %NULL on failure   */  struct iscsi_bus_flash_session * @@ -1089,7 +1423,7 @@ static int iscsi_iter_destroy_flashnode_conn_fn(struct device *dev, void *data)  }  /** - * iscsi_destroy_flashnode_sess - destory flashnode session entry + * iscsi_destroy_flashnode_sess - destroy flashnode session entry   * @fnode_sess: pointer to flashnode session entry to be destroyed   *   * Deletes the flashnode session entry and all children flashnode connection @@ -1119,7 +1453,7 @@ static int iscsi_iter_destroy_flashnode_fn(struct device *dev, void *data)  }  /** - * iscsi_destroy_all_flashnode - destory all flashnode session entries + * iscsi_destroy_all_flashnode - destroy all flashnode session entries   * @shost: pointer to host data   *   * Destroys all the flashnode session entries and all corresponding children @@ -2744,6 +3078,28 @@ exit_get_chap:  	return err;  } +static int iscsi_set_chap(struct iscsi_transport *transport, +			  struct iscsi_uevent *ev, uint32_t len) +{ +	char *data = (char *)ev + sizeof(*ev); +	struct Scsi_Host *shost; +	int err = 0; + +	if (!transport->set_chap) +		return -ENOSYS; + +	shost = scsi_host_lookup(ev->u.set_path.host_no); +	if (!shost) { +		pr_err("%s could not find host no %u\n", +		       __func__, ev->u.set_path.host_no); +		return -ENODEV; +	} + +	err = transport->set_chap(shost, data, len); +	scsi_host_put(shost); +	return err; +} +  static int iscsi_delete_chap(struct iscsi_transport *transport,  			     struct iscsi_uevent *ev)  { @@ -3060,6 +3416,73 @@ exit_logout_sid:  }  static int +iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) +{ +	struct iscsi_uevent *ev = nlmsg_data(nlh); +	struct Scsi_Host *shost = NULL; +	struct iscsi_internal *priv; +	struct sk_buff *skbhost_stats; +	struct nlmsghdr *nlhhost_stats; +	struct iscsi_uevent *evhost_stats; +	int host_stats_size = 0; +	int len, err = 0; +	char *buf; + +	if (!transport->get_host_stats) +		return -EINVAL; + +	priv = iscsi_if_transport_lookup(transport); +	if (!priv) +		return -EINVAL; + +	host_stats_size = sizeof(struct iscsi_offload_host_stats); +	len = nlmsg_total_size(sizeof(*ev) + host_stats_size); + +	shost = scsi_host_lookup(ev->u.get_host_stats.host_no); +	if (!shost) { +		pr_err("%s: failed. Cound not find host no %u\n", +		       __func__, ev->u.get_host_stats.host_no); +		return -ENODEV; +	} + +	do { +		int actual_size; + +		skbhost_stats = alloc_skb(len, GFP_KERNEL); +		if (!skbhost_stats) { +			pr_err("cannot deliver host stats: OOM\n"); +			err = -ENOMEM; +			goto exit_host_stats; +		} + +		nlhhost_stats = __nlmsg_put(skbhost_stats, 0, 0, 0, +				      (len - sizeof(*nlhhost_stats)), 0); +		evhost_stats = nlmsg_data(nlhhost_stats); +		memset(evhost_stats, 0, sizeof(*evhost_stats)); +		evhost_stats->transport_handle = iscsi_handle(transport); +		evhost_stats->type = nlh->nlmsg_type; +		evhost_stats->u.get_host_stats.host_no = +					ev->u.get_host_stats.host_no; +		buf = (char *)((char *)evhost_stats + sizeof(*evhost_stats)); +		memset(buf, 0, host_stats_size); + +		err = transport->get_host_stats(shost, buf, host_stats_size); + +		actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size); +		skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size)); +		nlhhost_stats->nlmsg_len = actual_size; + +		err = iscsi_multicast_skb(skbhost_stats, ISCSI_NL_GRP_ISCSID, +					  GFP_KERNEL); +	} while (err < 0 && err != -ECONNREFUSED); + +exit_host_stats: +	scsi_host_put(shost); +	return err; +} + + +static int  iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)  {  	int err = 0; @@ -3234,6 +3657,13 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)  	case ISCSI_UEVENT_LOGOUT_FLASHNODE_SID:  		err = iscsi_logout_flashnode_sid(transport, ev);  		break; +	case ISCSI_UEVENT_SET_CHAP: +		err = iscsi_set_chap(transport, ev, +				     nlmsg_attrlen(nlh, sizeof(*ev))); +		break; +	case ISCSI_UEVENT_GET_HOST_STATS: +		err = iscsi_get_host_stats(transport, nlh); +		break;  	default:  		err = -ENOSYS;  		break; @@ -3342,6 +3772,7 @@ iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL);  iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6);  iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);  iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF); +iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);  #define iscsi_conn_ep_attr_show(param)					\ @@ -3411,6 +3842,7 @@ static struct attribute *iscsi_conn_attrs[] = {  	&dev_attr_conn_is_fw_assigned_ipv6.attr,  	&dev_attr_conn_tcp_xmit_wsf.attr,  	&dev_attr_conn_tcp_recv_wsf.attr, +	&dev_attr_conn_local_ipaddr.attr,  	NULL,  }; @@ -3480,6 +3912,8 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,  		param = ISCSI_PARAM_TCP_XMIT_WSF;  	else if (attr == &dev_attr_conn_tcp_recv_wsf.attr)  		param = ISCSI_PARAM_TCP_RECV_WSF; +	else if (attr == &dev_attr_conn_local_ipaddr.attr) +		param = ISCSI_PARAM_LOCAL_IPADDR;  	else {  		WARN_ONCE(1, "Invalid conn attr");  		return 0; diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 1b681427dde..c341f855fad 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy)  	list_del(&rphy->list);  	mutex_unlock(&sas_host->lock); -	sas_bsg_remove(shost, rphy); -  	transport_destroy_device(dev);  	put_device(dev); @@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy)  	}  	sas_rphy_unlink(rphy); +	sas_bsg_remove(NULL, rphy);  	transport_remove_device(dev);  	device_del(dev);  } diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index f379c7f3034..13e898332e4 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -24,12 +24,15 @@  #include <linux/err.h>  #include <linux/slab.h>  #include <linux/string.h> +#include <linux/delay.h>  #include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h>  #include <scsi/scsi_device.h>  #include <scsi/scsi_host.h>  #include <scsi/scsi_transport.h>  #include <scsi/scsi_transport_srp.h> +#include "scsi_priv.h"  #include "scsi_transport_srp_internal.h"  struct srp_host_attrs { @@ -38,7 +41,7 @@ struct srp_host_attrs {  #define to_srp_host_attrs(host)	((struct srp_host_attrs *)(host)->shost_data)  #define SRP_HOST_ATTRS 0 -#define SRP_RPORT_ATTRS 3 +#define SRP_RPORT_ATTRS 8  struct srp_internal {  	struct scsi_transport_template t; @@ -54,6 +57,43 @@ struct srp_internal {  #define	dev_to_rport(d)	container_of(d, struct srp_rport, dev)  #define transport_class_to_srp_rport(dev) dev_to_rport((dev)->parent) +static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r) +{ +	return dev_to_shost(r->dev.parent); +} + +/** + * srp_tmo_valid() - check timeout combination validity + * @reconnect_delay: Reconnect delay in seconds. + * @fast_io_fail_tmo: Fast I/O fail timeout in seconds. + * @dev_loss_tmo: Device loss timeout in seconds. + * + * The combination of the timeout parameters must be such that SCSI commands + * are finished in a reasonable time. Hence do not allow the fast I/O fail + * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to + * exceed that limit if failing I/O fast has been disabled. Furthermore, these + * parameters must be such that multipath can detect failed paths timely. + * Hence do not allow all three parameters to be disabled simultaneously. + */ +int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, int dev_loss_tmo) +{ +	if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0) +		return -EINVAL; +	if (reconnect_delay == 0) +		return -EINVAL; +	if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) +		return -EINVAL; +	if (fast_io_fail_tmo < 0 && +	    dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) +		return -EINVAL; +	if (dev_loss_tmo >= LONG_MAX / HZ) +		return -EINVAL; +	if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 && +	    fast_io_fail_tmo >= dev_loss_tmo) +		return -EINVAL; +	return 0; +} +EXPORT_SYMBOL_GPL(srp_tmo_valid);  static int srp_host_setup(struct transport_container *tc, struct device *dev,  			  struct device *cdev) @@ -134,6 +174,456 @@ static ssize_t store_srp_rport_delete(struct device *dev,  static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete); +static ssize_t show_srp_rport_state(struct device *dev, +				    struct device_attribute *attr, +				    char *buf) +{ +	static const char *const state_name[] = { +		[SRP_RPORT_RUNNING]	= "running", +		[SRP_RPORT_BLOCKED]	= "blocked", +		[SRP_RPORT_FAIL_FAST]	= "fail-fast", +		[SRP_RPORT_LOST]	= "lost", +	}; +	struct srp_rport *rport = transport_class_to_srp_rport(dev); +	enum srp_rport_state state = rport->state; + +	return sprintf(buf, "%s\n", +		       (unsigned)state < ARRAY_SIZE(state_name) ? +		       state_name[state] : "???"); +} + +static DEVICE_ATTR(state, S_IRUGO, show_srp_rport_state, NULL); + +static ssize_t srp_show_tmo(char *buf, int tmo) +{ +	return tmo >= 0 ? sprintf(buf, "%d\n", tmo) : sprintf(buf, "off\n"); +} + +static int srp_parse_tmo(int *tmo, const char *buf) +{ +	int res = 0; + +	if (strncmp(buf, "off", 3) != 0) +		res = kstrtoint(buf, 0, tmo); +	else +		*tmo = -1; + +	return res; +} + +static ssize_t show_reconnect_delay(struct device *dev, +				    struct device_attribute *attr, char *buf) +{ +	struct srp_rport *rport = transport_class_to_srp_rport(dev); + +	return srp_show_tmo(buf, rport->reconnect_delay); +} + +static ssize_t store_reconnect_delay(struct device *dev, +				     struct device_attribute *attr, +				     const char *buf, const size_t count) +{ +	struct srp_rport *rport = transport_class_to_srp_rport(dev); +	int res, delay; + +	res = srp_parse_tmo(&delay, buf); +	if (res) +		goto out; +	res = srp_tmo_valid(delay, rport->fast_io_fail_tmo, +			    rport->dev_loss_tmo); +	if (res) +		goto out; + +	if (rport->reconnect_delay <= 0 && delay > 0 && +	    rport->state != SRP_RPORT_RUNNING) { +		queue_delayed_work(system_long_wq, &rport->reconnect_work, +				   delay * HZ); +	} else if (delay <= 0) { +		cancel_delayed_work(&rport->reconnect_work); +	} +	rport->reconnect_delay = delay; +	res = count; + +out: +	return res; +} + +static DEVICE_ATTR(reconnect_delay, S_IRUGO | S_IWUSR, show_reconnect_delay, +		   store_reconnect_delay); + +static ssize_t show_failed_reconnects(struct device *dev, +				      struct device_attribute *attr, char *buf) +{ +	struct srp_rport *rport = transport_class_to_srp_rport(dev); + +	return sprintf(buf, "%d\n", rport->failed_reconnects); +} + +static DEVICE_ATTR(failed_reconnects, S_IRUGO, show_failed_reconnects, NULL); + +static ssize_t show_srp_rport_fast_io_fail_tmo(struct device *dev, +					       struct device_attribute *attr, +					       char *buf) +{ +	struct srp_rport *rport = transport_class_to_srp_rport(dev); + +	return srp_show_tmo(buf, rport->fast_io_fail_tmo); +} + +static ssize_t store_srp_rport_fast_io_fail_tmo(struct device *dev, +						struct device_attribute *attr, +						const char *buf, size_t count) +{ +	struct srp_rport *rport = transport_class_to_srp_rport(dev); +	int res; +	int fast_io_fail_tmo; + +	res = srp_parse_tmo(&fast_io_fail_tmo, buf); +	if (res) +		goto out; +	res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo, +			    rport->dev_loss_tmo); +	if (res) +		goto out; +	rport->fast_io_fail_tmo = fast_io_fail_tmo; +	res = count; + +out: +	return res; +} + +static DEVICE_ATTR(fast_io_fail_tmo, S_IRUGO | S_IWUSR, +		   show_srp_rport_fast_io_fail_tmo, +		   store_srp_rport_fast_io_fail_tmo); + +static ssize_t show_srp_rport_dev_loss_tmo(struct device *dev, +					   struct device_attribute *attr, +					   char *buf) +{ +	struct srp_rport *rport = transport_class_to_srp_rport(dev); + +	return srp_show_tmo(buf, rport->dev_loss_tmo); +} + +static ssize_t store_srp_rport_dev_loss_tmo(struct device *dev, +					    struct device_attribute *attr, +					    const char *buf, size_t count) +{ +	struct srp_rport *rport = transport_class_to_srp_rport(dev); +	int res; +	int dev_loss_tmo; + +	res = srp_parse_tmo(&dev_loss_tmo, buf); +	if (res) +		goto out; +	res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo, +			    dev_loss_tmo); +	if (res) +		goto out; +	rport->dev_loss_tmo = dev_loss_tmo; +	res = count; + +out: +	return res; +} + +static DEVICE_ATTR(dev_loss_tmo, S_IRUGO | S_IWUSR, +		   show_srp_rport_dev_loss_tmo, +		   store_srp_rport_dev_loss_tmo); + +static int srp_rport_set_state(struct srp_rport *rport, +			       enum srp_rport_state new_state) +{ +	enum srp_rport_state old_state = rport->state; + +	lockdep_assert_held(&rport->mutex); + +	switch (new_state) { +	case SRP_RPORT_RUNNING: +		switch (old_state) { +		case SRP_RPORT_LOST: +			goto invalid; +		default: +			break; +		} +		break; +	case SRP_RPORT_BLOCKED: +		switch (old_state) { +		case SRP_RPORT_RUNNING: +			break; +		default: +			goto invalid; +		} +		break; +	case SRP_RPORT_FAIL_FAST: +		switch (old_state) { +		case SRP_RPORT_LOST: +			goto invalid; +		default: +			break; +		} +		break; +	case SRP_RPORT_LOST: +		break; +	} +	rport->state = new_state; +	return 0; + +invalid: +	return -EINVAL; +} + +/** + * srp_reconnect_work() - reconnect and schedule a new attempt if necessary + * @work: Work structure used for scheduling this operation. + */ +static void srp_reconnect_work(struct work_struct *work) +{ +	struct srp_rport *rport = container_of(to_delayed_work(work), +					struct srp_rport, reconnect_work); +	struct Scsi_Host *shost = rport_to_shost(rport); +	int delay, res; + +	res = srp_reconnect_rport(rport); +	if (res != 0) { +		shost_printk(KERN_ERR, shost, +			     "reconnect attempt %d failed (%d)\n", +			     ++rport->failed_reconnects, res); +		delay = rport->reconnect_delay * +			min(100, max(1, rport->failed_reconnects - 10)); +		if (delay > 0) +			queue_delayed_work(system_long_wq, +					   &rport->reconnect_work, delay * HZ); +	} +} + +static void __rport_fail_io_fast(struct srp_rport *rport) +{ +	struct Scsi_Host *shost = rport_to_shost(rport); +	struct srp_internal *i; + +	lockdep_assert_held(&rport->mutex); + +	if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) +		return; +	scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); + +	/* Involve the LLD if possible to terminate all I/O on the rport. */ +	i = to_srp_internal(shost->transportt); +	if (i->f->terminate_rport_io) +		i->f->terminate_rport_io(rport); +} + +/** + * rport_fast_io_fail_timedout() - fast I/O failure timeout handler + * @work: Work structure used for scheduling this operation. + */ +static void rport_fast_io_fail_timedout(struct work_struct *work) +{ +	struct srp_rport *rport = container_of(to_delayed_work(work), +					struct srp_rport, fast_io_fail_work); +	struct Scsi_Host *shost = rport_to_shost(rport); + +	pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n", +		dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + +	mutex_lock(&rport->mutex); +	if (rport->state == SRP_RPORT_BLOCKED) +		__rport_fail_io_fast(rport); +	mutex_unlock(&rport->mutex); +} + +/** + * rport_dev_loss_timedout() - device loss timeout handler + * @work: Work structure used for scheduling this operation. + */ +static void rport_dev_loss_timedout(struct work_struct *work) +{ +	struct srp_rport *rport = container_of(to_delayed_work(work), +					struct srp_rport, dev_loss_work); +	struct Scsi_Host *shost = rport_to_shost(rport); +	struct srp_internal *i = to_srp_internal(shost->transportt); + +	pr_info("dev_loss_tmo expired for SRP %s / %s.\n", +		dev_name(&rport->dev), dev_name(&shost->shost_gendev)); + +	mutex_lock(&rport->mutex); +	WARN_ON(srp_rport_set_state(rport, SRP_RPORT_LOST) != 0); +	scsi_target_unblock(rport->dev.parent, SDEV_TRANSPORT_OFFLINE); +	mutex_unlock(&rport->mutex); + +	i->f->rport_delete(rport); +} + +static void __srp_start_tl_fail_timers(struct srp_rport *rport) +{ +	struct Scsi_Host *shost = rport_to_shost(rport); +	int delay, fast_io_fail_tmo, dev_loss_tmo; + +	lockdep_assert_held(&rport->mutex); + +	delay = rport->reconnect_delay; +	fast_io_fail_tmo = rport->fast_io_fail_tmo; +	dev_loss_tmo = rport->dev_loss_tmo; +	pr_debug("%s current state: %d\n", dev_name(&shost->shost_gendev), +		 rport->state); + +	if (rport->state == SRP_RPORT_LOST) +		return; +	if (delay > 0) +		queue_delayed_work(system_long_wq, &rport->reconnect_work, +				   1UL * delay * HZ); +	if (srp_rport_set_state(rport, SRP_RPORT_BLOCKED) == 0) { +		pr_debug("%s new state: %d\n", dev_name(&shost->shost_gendev), +			 rport->state); +		scsi_target_block(&shost->shost_gendev); +		if (fast_io_fail_tmo >= 0) +			queue_delayed_work(system_long_wq, +					   &rport->fast_io_fail_work, +					   1UL * fast_io_fail_tmo * HZ); +		if (dev_loss_tmo >= 0) +			queue_delayed_work(system_long_wq, +					   &rport->dev_loss_work, +					   1UL * dev_loss_tmo * HZ); +	} +} + +/** + * srp_start_tl_fail_timers() - start the transport layer failure timers + * @rport: SRP target port. + * + * Start the transport layer fast I/O failure and device loss timers. Do not + * modify a timer that was already started. + */ +void srp_start_tl_fail_timers(struct srp_rport *rport) +{ +	mutex_lock(&rport->mutex); +	__srp_start_tl_fail_timers(rport); +	mutex_unlock(&rport->mutex); +} +EXPORT_SYMBOL(srp_start_tl_fail_timers); + +/** + * scsi_request_fn_active() - number of kernel threads inside scsi_request_fn() + * @shost: SCSI host for which to count the number of scsi_request_fn() callers. + */ +static int scsi_request_fn_active(struct Scsi_Host *shost) +{ +	struct scsi_device *sdev; +	struct request_queue *q; +	int request_fn_active = 0; + +	shost_for_each_device(sdev, shost) { +		q = sdev->request_queue; + +		spin_lock_irq(q->queue_lock); +		request_fn_active += q->request_fn_active; +		spin_unlock_irq(q->queue_lock); +	} + +	return request_fn_active; +} + +/** + * srp_reconnect_rport() - reconnect to an SRP target port + * @rport: SRP target port. + * + * Blocks SCSI command queueing before invoking reconnect() such that + * queuecommand() won't be invoked concurrently with reconnect() from outside + * the SCSI EH. This is important since a reconnect() implementation may + * reallocate resources needed by queuecommand(). + * + * Notes: + * - This function neither waits until outstanding requests have finished nor + *   tries to abort these. It is the responsibility of the reconnect() + *   function to finish outstanding commands before reconnecting to the target + *   port. + * - It is the responsibility of the caller to ensure that the resources + *   reallocated by the reconnect() function won't be used while this function + *   is in progress. One possible strategy is to invoke this function from + *   the context of the SCSI EH thread only. Another possible strategy is to + *   lock the rport mutex inside each SCSI LLD callback that can be invoked by + *   the SCSI EH (the scsi_host_template.eh_*() functions and also the + *   scsi_host_template.queuecommand() function). + */ +int srp_reconnect_rport(struct srp_rport *rport) +{ +	struct Scsi_Host *shost = rport_to_shost(rport); +	struct srp_internal *i = to_srp_internal(shost->transportt); +	struct scsi_device *sdev; +	int res; + +	pr_debug("SCSI host %s\n", dev_name(&shost->shost_gendev)); + +	res = mutex_lock_interruptible(&rport->mutex); +	if (res) +		goto out; +	scsi_target_block(&shost->shost_gendev); +	while (scsi_request_fn_active(shost)) +		msleep(20); +	res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; +	pr_debug("%s (state %d): transport.reconnect() returned %d\n", +		 dev_name(&shost->shost_gendev), rport->state, res); +	if (res == 0) { +		cancel_delayed_work(&rport->fast_io_fail_work); +		cancel_delayed_work(&rport->dev_loss_work); + +		rport->failed_reconnects = 0; +		srp_rport_set_state(rport, SRP_RPORT_RUNNING); +		scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING); +		/* +		 * If the SCSI error handler has offlined one or more devices, +		 * invoking scsi_target_unblock() won't change the state of +		 * these devices into running so do that explicitly. +		 */ +		spin_lock_irq(shost->host_lock); +		__shost_for_each_device(sdev, shost) +			if (sdev->sdev_state == SDEV_OFFLINE) +				sdev->sdev_state = SDEV_RUNNING; +		spin_unlock_irq(shost->host_lock); +	} else if (rport->state == SRP_RPORT_RUNNING) { +		/* +		 * srp_reconnect_rport() has been invoked with fast_io_fail +		 * and dev_loss off. Mark the port as failed and start the TL +		 * failure timers if these had not yet been started. +		 */ +		__rport_fail_io_fast(rport); +		scsi_target_unblock(&shost->shost_gendev, +				    SDEV_TRANSPORT_OFFLINE); +		__srp_start_tl_fail_timers(rport); +	} else if (rport->state != SRP_RPORT_BLOCKED) { +		scsi_target_unblock(&shost->shost_gendev, +				    SDEV_TRANSPORT_OFFLINE); +	} +	mutex_unlock(&rport->mutex); + +out: +	return res; +} +EXPORT_SYMBOL(srp_reconnect_rport); + +/** + * srp_timed_out() - SRP transport intercept of the SCSI timeout EH + * @scmd: SCSI command. + * + * If a timeout occurs while an rport is in the blocked state, ask the SCSI + * EH to continue waiting (BLK_EH_RESET_TIMER). Otherwise let the SCSI core + * handle the timeout (BLK_EH_NOT_HANDLED). + * + * Note: This function is called from soft-IRQ context and with the request + * queue lock held. + */ +static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd) +{ +	struct scsi_device *sdev = scmd->device; +	struct Scsi_Host *shost = sdev->host; +	struct srp_internal *i = to_srp_internal(shost->transportt); + +	pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev)); +	return i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ? +		BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; +} +  static void srp_rport_release(struct device *dev)  {  	struct srp_rport *rport = dev_to_rport(dev); @@ -185,6 +675,26 @@ static int srp_host_match(struct attribute_container *cont, struct device *dev)  }  /** + * srp_rport_get() - increment rport reference count + * @rport: SRP target port. + */ +void srp_rport_get(struct srp_rport *rport) +{ +	get_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_get); + +/** + * srp_rport_put() - decrement rport reference count + * @rport: SRP target port. + */ +void srp_rport_put(struct srp_rport *rport) +{ +	put_device(&rport->dev); +} +EXPORT_SYMBOL(srp_rport_put); + +/**   * srp_rport_add - add a SRP remote port to the device hierarchy   * @shost:	scsi host the remote port is connected to.   * @ids:	The port id for the remote port. @@ -196,12 +706,15 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,  {  	struct srp_rport *rport;  	struct device *parent = &shost->shost_gendev; +	struct srp_internal *i = to_srp_internal(shost->transportt);  	int id, ret;  	rport = kzalloc(sizeof(*rport), GFP_KERNEL);  	if (!rport)  		return ERR_PTR(-ENOMEM); +	mutex_init(&rport->mutex); +  	device_initialize(&rport->dev);  	rport->dev.parent = get_device(parent); @@ -210,6 +723,17 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,  	memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));  	rport->roles = ids->roles; +	if (i->f->reconnect) +		rport->reconnect_delay = i->f->reconnect_delay ? +			*i->f->reconnect_delay : 10; +	INIT_DELAYED_WORK(&rport->reconnect_work, srp_reconnect_work); +	rport->fast_io_fail_tmo = i->f->fast_io_fail_tmo ? +		*i->f->fast_io_fail_tmo : 15; +	rport->dev_loss_tmo = i->f->dev_loss_tmo ? *i->f->dev_loss_tmo : 60; +	INIT_DELAYED_WORK(&rport->fast_io_fail_work, +			  rport_fast_io_fail_timedout); +	INIT_DELAYED_WORK(&rport->dev_loss_work, rport_dev_loss_timedout); +  	id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);  	dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id); @@ -259,6 +783,7 @@ void srp_rport_del(struct srp_rport *rport)  	transport_remove_device(dev);  	device_del(dev);  	transport_destroy_device(dev); +  	put_device(dev);  }  EXPORT_SYMBOL_GPL(srp_rport_del); @@ -283,6 +808,28 @@ void srp_remove_host(struct Scsi_Host *shost)  }  EXPORT_SYMBOL_GPL(srp_remove_host); +/** + * srp_stop_rport_timers - stop the transport layer recovery timers + * @rport: SRP remote port for which to stop the timers. + * + * Must be called after srp_remove_host() and scsi_remove_host(). The caller + * must hold a reference on the rport (rport->dev) and on the SCSI host + * (rport->dev.parent). + */ +void srp_stop_rport_timers(struct srp_rport *rport) +{ +	mutex_lock(&rport->mutex); +	if (rport->state == SRP_RPORT_BLOCKED) +		__rport_fail_io_fast(rport); +	srp_rport_set_state(rport, SRP_RPORT_LOST); +	mutex_unlock(&rport->mutex); + +	cancel_delayed_work_sync(&rport->reconnect_work); +	cancel_delayed_work_sync(&rport->fast_io_fail_work); +	cancel_delayed_work_sync(&rport->dev_loss_work); +} +EXPORT_SYMBOL_GPL(srp_stop_rport_timers); +  static int srp_tsk_mgmt_response(struct Scsi_Host *shost, u64 nexus, u64 tm_id,  				 int result)  { @@ -310,6 +857,8 @@ srp_attach_transport(struct srp_function_template *ft)  	if (!i)  		return NULL; +	i->t.eh_timed_out = srp_timed_out; +  	i->t.tsk_mgmt_response = srp_tsk_mgmt_response;  	i->t.it_nexus_response = srp_it_nexus_response; @@ -327,6 +876,15 @@ srp_attach_transport(struct srp_function_template *ft)  	count = 0;  	i->rport_attrs[count++] = &dev_attr_port_id;  	i->rport_attrs[count++] = &dev_attr_roles; +	if (ft->has_rport_state) { +		i->rport_attrs[count++] = &dev_attr_state; +		i->rport_attrs[count++] = &dev_attr_fast_io_fail_tmo; +		i->rport_attrs[count++] = &dev_attr_dev_loss_tmo; +	} +	if (ft->reconnect) { +		i->rport_attrs[count++] = &dev_attr_reconnect_delay; +		i->rport_attrs[count++] = &dev_attr_failed_reconnects; +	}  	if (ft->rport_delete)  		i->rport_attrs[count++] = &dev_attr_delete;  	i->rport_attrs[count++] = NULL; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index e62d17d41d4..6825eda1114 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -105,11 +105,14 @@ static void sd_unlock_native_capacity(struct gendisk *disk);  static int  sd_probe(struct device *);  static int  sd_remove(struct device *);  static void sd_shutdown(struct device *); -static int sd_suspend(struct device *); +static int sd_suspend_system(struct device *); +static int sd_suspend_runtime(struct device *);  static int sd_resume(struct device *);  static void sd_rescan(struct device *); +static int sd_init_command(struct scsi_cmnd *SCpnt); +static void sd_uninit_command(struct scsi_cmnd *SCpnt);  static int sd_done(struct scsi_cmnd *); -static int sd_eh_action(struct scsi_cmnd *, unsigned char *, int, int); +static int sd_eh_action(struct scsi_cmnd *, int);  static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);  static void scsi_disk_release(struct device *cdev);  static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); @@ -484,11 +487,11 @@ static struct class sd_disk_class = {  };  static const struct dev_pm_ops sd_pm_ops = { -	.suspend		= sd_suspend, +	.suspend		= sd_suspend_system,  	.resume			= sd_resume, -	.poweroff		= sd_suspend, +	.poweroff		= sd_suspend_system,  	.restore		= sd_resume, -	.runtime_suspend	= sd_suspend, +	.runtime_suspend	= sd_suspend_runtime,  	.runtime_resume		= sd_resume,  }; @@ -502,6 +505,8 @@ static struct scsi_driver sd_template = {  		.pm		= &sd_pm_ops,  	},  	.rescan			= sd_rescan, +	.init_command		= sd_init_command, +	.uninit_command		= sd_uninit_command,  	.done			= sd_done,  	.eh_action		= sd_eh_action,  }; @@ -736,16 +741,14 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)  		goto out;  	} +	rq->completion_data = page;  	blk_add_request_payload(rq, page, len);  	ret = scsi_setup_blk_pc_cmnd(sdp, rq); -	rq->buffer = page_address(page);  	rq->__data_len = nr_bytes;  out: -	if (ret != BLKPREP_OK) { +	if (ret != BLKPREP_OK)  		__free_page(page); -		rq->buffer = NULL; -	}  	return ret;  } @@ -800,7 +803,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)  	if (sdkp->device->no_write_same)  		return BLKPREP_KILL; -	BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size); +	BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);  	sector >>= ilog2(sdp->sector_size) - 9;  	nr_sectors >>= ilog2(sdp->sector_size) - 9; @@ -829,7 +832,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)  static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)  { -	rq->timeout = SD_FLUSH_TIMEOUT; +	rq->timeout *= SD_FLUSH_TIMEOUT_MULTIPLIER;  	rq->retries = SD_MAX_RETRIES;  	rq->cmd[0] = SYNCHRONIZE_CACHE;  	rq->cmd_len = 10; @@ -837,14 +840,13 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)  	return scsi_setup_blk_pc_cmnd(sdp, rq);  } -static void sd_unprep_fn(struct request_queue *q, struct request *rq) +static void sd_uninit_command(struct scsi_cmnd *SCpnt)  { -	struct scsi_cmnd *SCpnt = rq->special; +	struct request *rq = SCpnt->request; + +	if (rq->cmd_flags & REQ_DISCARD) +		__free_page(rq->completion_data); -	if (rq->cmd_flags & REQ_DISCARD) { -		free_page((unsigned long)rq->buffer); -		rq->buffer = NULL; -	}  	if (SCpnt->cmnd != rq->cmd) {  		mempool_free(SCpnt->cmnd, sd_cdb_pool);  		SCpnt->cmnd = NULL; @@ -852,18 +854,10 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)  	}  } -/** - *	sd_prep_fn - build a scsi (read or write) command from - *	information in the request structure. - *	@SCpnt: pointer to mid-level's per scsi command structure that - *	contains request and into which the scsi command is written - * - *	Returns 1 if successful and 0 if error (or cannot be done now). - **/ -static int sd_prep_fn(struct request_queue *q, struct request *rq) +static int sd_init_command(struct scsi_cmnd *SCpnt)  { -	struct scsi_cmnd *SCpnt; -	struct scsi_device *sdp = q->queuedata; +	struct request *rq = SCpnt->request; +	struct scsi_device *sdp = SCpnt->device;  	struct gendisk *disk = rq->rq_disk;  	struct scsi_disk *sdkp;  	sector_t block = blk_rq_pos(rq); @@ -885,12 +879,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)  	} else if (rq->cmd_flags & REQ_FLUSH) {  		ret = scsi_setup_flush_cmnd(sdp, rq);  		goto out; -	} else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { -		ret = scsi_setup_blk_pc_cmnd(sdp, rq); -		goto out; -	} else if (rq->cmd_type != REQ_TYPE_FS) { -		ret = BLKPREP_KILL; -		goto out;  	}  	ret = scsi_setup_fs_cmnd(sdp, rq);  	if (ret != BLKPREP_OK) @@ -902,11 +890,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)  	 * is used for a killable error condition */  	ret = BLKPREP_KILL; -	SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt, -					"sd_prep_fn: block=%llu, " -					"count=%d\n", -					(unsigned long long)block, -					this_count)); +	SCSI_LOG_HLQUEUE(1, +		scmd_printk(KERN_INFO, SCpnt, +			"%s: block=%llu, count=%d\n", +			__func__, (unsigned long long)block, this_count));  	if (!sdp || !scsi_device_online(sdp) ||  	    block + blk_rq_sectors(rq) > get_capacity(disk)) { @@ -1002,7 +989,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)  		SCpnt->cmnd[0] = READ_6;  		SCpnt->sc_data_direction = DMA_FROM_DEVICE;  	} else { -		scmd_printk(KERN_ERR, SCpnt, "Unknown command %x\n", rq->cmd_flags); +		scmd_printk(KERN_ERR, SCpnt, "Unknown command %llx\n", (unsigned long long) rq->cmd_flags);  		goto out;  	} @@ -1126,7 +1113,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)  	 */  	ret = BLKPREP_OK;   out: -	return scsi_prep_return(q, rq, ret); +	return ret;  }  /** @@ -1433,12 +1420,13 @@ static int sd_sync_cache(struct scsi_disk *sdkp)  {  	int retries, res;  	struct scsi_device *sdp = sdkp->device; +	const int timeout = sdp->request_queue->rq_timeout +		* SD_FLUSH_TIMEOUT_MULTIPLIER;  	struct scsi_sense_hdr sshdr;  	if (!scsi_device_online(sdp))  		return -ENODEV; -  	for (retries = 3; retries > 0; --retries) {  		unsigned char cmd[10] = { 0 }; @@ -1448,20 +1436,39 @@ static int sd_sync_cache(struct scsi_disk *sdkp)  		 * flush everything.  		 */  		res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, -					     &sshdr, SD_FLUSH_TIMEOUT, -					     SD_MAX_RETRIES, NULL, REQ_PM); +					     &sshdr, timeout, SD_MAX_RETRIES, +					     NULL, REQ_PM);  		if (res == 0)  			break;  	}  	if (res) {  		sd_print_result(sdkp, res); +  		if (driver_byte(res) & DRIVER_SENSE)  			sd_print_sense_hdr(sdkp, &sshdr); +		/* we need to evaluate the error return  */ +		if (scsi_sense_valid(&sshdr) && +			(sshdr.asc == 0x3a ||	/* medium not present */ +			 sshdr.asc == 0x20))	/* invalid command */ +				/* this is no error here */ +				return 0; + +		switch (host_byte(res)) { +		/* ignore errors due to racing a disconnection */ +		case DID_BAD_TARGET: +		case DID_NO_CONNECT: +			return 0; +		/* signal the upper layer it might try again */ +		case DID_BUS_BUSY: +		case DID_IMM_RETRY: +		case DID_REQUEUE: +		case DID_SOFT_ERROR: +			return -EBUSY; +		default: +			return -EIO; +		}  	} - -	if (res) -		return -EIO;  	return 0;  } @@ -1530,23 +1537,23 @@ static const struct block_device_operations sd_fops = {  /**   *	sd_eh_action - error handling callback   *	@scmd:		sd-issued command that has failed - *	@eh_cmnd:	The command that was sent during error handling - *	@eh_cmnd_len:	Length of eh_cmnd in bytes   *	@eh_disp:	The recovery disposition suggested by the midlayer   * - *	This function is called by the SCSI midlayer upon completion of - *	an error handling command (TEST UNIT READY, START STOP UNIT, - *	etc.) The command sent to the device by the error handler is - *	stored in eh_cmnd. The result of sending the eh command is - *	passed in eh_disp. + *	This function is called by the SCSI midlayer upon completion of an + *	error test command (currently TEST UNIT READY). The result of sending + *	the eh command is passed in eh_disp.  We're looking for devices that + *	fail medium access commands but are OK with non access commands like + *	test unit ready (so wrongly see the device as having a successful + *	recovery)   **/ -static int sd_eh_action(struct scsi_cmnd *scmd, unsigned char *eh_cmnd, -			int eh_cmnd_len, int eh_disp) +static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)  {  	struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);  	if (!scsi_device_online(scmd->device) || -	    !scsi_medium_access_command(scmd)) +	    !scsi_medium_access_command(scmd) || +	    host_byte(scmd->result) != DID_TIME_OUT || +	    eh_disp != SUCCESS)  		return eh_disp;  	/* @@ -1556,9 +1563,7 @@ static int sd_eh_action(struct scsi_cmnd *scmd, unsigned char *eh_cmnd,  	 * process of recovering or has it suffered an internal failure  	 * that prevents access to the storage medium.  	 */ -	if (host_byte(scmd->result) == DID_TIME_OUT && eh_disp == SUCCESS && -	    eh_cmnd_len && eh_cmnd[0] == TEST_UNIT_READY) -		sdkp->medium_access_timed_out++; +	sdkp->medium_access_timed_out++;  	/*  	 * If the device keeps failing read/write commands but TEST UNIT @@ -1607,7 +1612,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)  		end_lba <<= 1;  	} else {  		/* be careful ... don't want any overflows */ -		u64 factor = scmd->device->sector_size / 512; +		unsigned int factor = scmd->device->sector_size / 512;  		do_div(start_lba, factor);  		do_div(end_lba, factor);  	} @@ -1670,12 +1675,12 @@ static int sd_done(struct scsi_cmnd *SCpnt)  						   sshdr.ascq));  	}  #endif +	sdkp->medium_access_timed_out = 0; +  	if (driver_byte(result) != DRIVER_SENSE &&  	    (!sense_valid || sense_deferred))  		goto out; -	sdkp->medium_access_timed_out = 0; -  	switch (sshdr.sense_key) {  	case HARDWARE_ERROR:  	case MEDIUM_ERROR: @@ -2262,7 +2267,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)  	set_disk_ro(sdkp->disk, 0);  	if (sdp->skip_ms_page_3f) { -		sd_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n"); +		sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");  		return;  	} @@ -2294,7 +2299,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)  	}  	if (!scsi_status_is_good(res)) { -		sd_printk(KERN_WARNING, sdkp, +		sd_first_printk(KERN_WARNING, sdkp,  			  "Test WP failed, assume Write Enabled\n");  	} else {  		sdkp->write_prot = ((data.device_specific & 0x80) != 0); @@ -2362,7 +2367,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)  	if (!data.header_length) {  		modepage = 6;  		first_len = 0; -		sd_printk(KERN_ERR, sdkp, "Missing header in MODE_SENSE response\n"); +		sd_first_printk(KERN_ERR, sdkp, +				"Missing header in MODE_SENSE response\n");  	}  	/* that went OK, now ask for the proper length */ @@ -2375,7 +2381,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)  	if (len < 3)  		goto bad_sense;  	else if (len > SD_BUF_SIZE) { -		sd_printk(KERN_NOTICE, sdkp, "Truncating mode parameter " +		sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "  			  "data from %d to %d bytes\n", len, SD_BUF_SIZE);  		len = SD_BUF_SIZE;  	} @@ -2398,8 +2404,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)  				/* We're interested only in the first 3 bytes.  				 */  				if (len - offset <= 2) { -					sd_printk(KERN_ERR, sdkp, "Incomplete " -						  "mode parameter data\n"); +					sd_first_printk(KERN_ERR, sdkp, +						"Incomplete mode parameter " +							"data\n");  					goto defaults;  				} else {  					modepage = page_code; @@ -2413,14 +2420,15 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)  				else if (!spf && len - offset > 1)  					offset += 2 + buffer[offset+1];  				else { -					sd_printk(KERN_ERR, sdkp, "Incomplete " -						  "mode parameter data\n"); +					sd_first_printk(KERN_ERR, sdkp, +							"Incomplete mode " +							"parameter data\n");  					goto defaults;  				}  			}  		} -		sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); +		sd_first_printk(KERN_ERR, sdkp, "No Caching mode page found\n");  		goto defaults;  	Page_found: @@ -2433,8 +2441,11 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)  		}  		sdkp->DPOFUA = (data.device_specific & 0x10) != 0; -		if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { -			sd_printk(KERN_NOTICE, sdkp, +		if (sdp->broken_fua) { +			sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n"); +			sdkp->DPOFUA = 0; +		} else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { +			sd_first_printk(KERN_NOTICE, sdkp,  				  "Uses READ/WRITE(6), disabling FUA\n");  			sdkp->DPOFUA = 0;  		} @@ -2456,16 +2467,19 @@ bad_sense:  	    sshdr.sense_key == ILLEGAL_REQUEST &&  	    sshdr.asc == 0x24 && sshdr.ascq == 0x0)  		/* Invalid field in CDB */ -		sd_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n"); +		sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");  	else -		sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n"); +		sd_first_printk(KERN_ERR, sdkp, +				"Asking for cache data failed\n");  defaults:  	if (sdp->wce_default_on) { -		sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n"); +		sd_first_printk(KERN_NOTICE, sdkp, +				"Assuming drive cache: write back\n");  		sdkp->WCE = 1;  	} else { -		sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n"); +		sd_first_printk(KERN_ERR, sdkp, +				"Assuming drive cache: write through\n");  		sdkp->WCE = 0;  	}  	sdkp->RCD = 0; @@ -2494,7 +2508,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)  	if (!scsi_status_is_good(res) || !data.header_length ||  	    data.length < 6) { -		sd_printk(KERN_WARNING, sdkp, +		sd_first_printk(KERN_WARNING, sdkp,  			  "getting Control mode page failed, assume no ATO\n");  		if (scsi_sense_valid(&sshdr)) @@ -2506,7 +2520,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)  	offset = data.header_length + data.block_descriptor_length;  	if ((buffer[offset] & 0x3f) != 0x0a) { -		sd_printk(KERN_ERR, sdkp, "ATO Got wrong page\n"); +		sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");  		return;  	} @@ -2638,14 +2652,23 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)  {  	struct scsi_device *sdev = sdkp->device; +	if (sdev->host->no_write_same) { +		sdev->no_write_same = 1; + +		return; +	} +  	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) { +		/* too large values might cause issues with arcmsr */ +		int vpd_buf_len = 64; +  		sdev->no_report_opcodes = 1;  		/* Disable WRITE SAME if REPORT SUPPORTED OPERATION  		 * CODES is unsupported and the device has an ATA  		 * Information VPD page (SAT).  		 */ -		if (!scsi_get_vpd_page(sdev, 0x89, buffer, SD_BUF_SIZE)) +		if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))  			sdev->no_write_same = 1;  	} @@ -2844,9 +2867,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)  	sd_revalidate_disk(gd); -	blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); -	blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn); -  	gd->driverfs_dev = &sdp->sdev_gendev;  	gd->flags = GENHD_FL_EXT_DEVT;  	if (sdp->removable) { @@ -2854,6 +2874,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)  		gd->events |= DISK_EVENT_MEDIA_CHANGE;  	} +	blk_pm_runtime_init(sdp->request_queue, dev);  	add_disk(gd);  	if (sdkp->capacity)  		sd_dif_config_host(sdkp); @@ -2862,7 +2883,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)  	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",  		  sdp->removable ? "removable " : ""); -	blk_pm_runtime_init(sdp->request_queue, dev);  	scsi_autopm_put_device(sdp);  	put_device(&sdkp->dev);  } @@ -2992,9 +3012,8 @@ static int sd_remove(struct device *dev)  	devt = disk_devt(sdkp->disk);  	scsi_autopm_get_device(sdkp->device); +	async_synchronize_full_domain(&scsi_sd_pm_domain);  	async_synchronize_full_domain(&scsi_sd_probe_domain); -	blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); -	blk_queue_unprep_rq(sdkp->device->request_queue, NULL);  	device_del(&sdkp->dev);  	del_gendisk(sdkp->disk);  	sd_shutdown(dev); @@ -3058,9 +3077,17 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)  		sd_print_result(sdkp, res);  		if (driver_byte(res) & DRIVER_SENSE)  			sd_print_sense_hdr(sdkp, &sshdr); +		if (scsi_sense_valid(&sshdr) && +			/* 0x3a is medium not present */ +			sshdr.asc == 0x3a) +			res = 0;  	} -	return res; +	/* SCSI error codes must not go to the generic layer */ +	if (res) +		return -EIO; + +	return 0;  }  /* @@ -3078,7 +3105,7 @@ static void sd_shutdown(struct device *dev)  	if (pm_runtime_suspended(dev))  		goto exit; -	if (sdkp->WCE) { +	if (sdkp->WCE && sdkp->media_present) {  		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");  		sd_sync_cache(sdkp);  	} @@ -3092,7 +3119,7 @@ exit:  	scsi_disk_put(sdkp);  } -static int sd_suspend(struct device *dev) +static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)  {  	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);  	int ret = 0; @@ -3100,16 +3127,23 @@ static int sd_suspend(struct device *dev)  	if (!sdkp)  		return 0;	/* this can happen */ -	if (sdkp->WCE) { +	if (sdkp->WCE && sdkp->media_present) {  		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");  		ret = sd_sync_cache(sdkp); -		if (ret) +		if (ret) { +			/* ignore OFFLINE device */ +			if (ret == -ENODEV) +				ret = 0;  			goto done; +		}  	}  	if (sdkp->device->manage_start_stop) {  		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n"); +		/* an error is not worth aborting a system sleep */  		ret = sd_start_stop_device(sdkp, 0); +		if (ignore_stop_errors) +			ret = 0;  	}  done: @@ -3117,6 +3151,16 @@ done:  	return ret;  } +static int sd_suspend_system(struct device *dev) +{ +	return sd_suspend_common(dev, true); +} + +static int sd_suspend_runtime(struct device *dev) +{ +	return sd_suspend_common(dev, false); +} +  static int sd_resume(struct device *dev)  {  	struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 7a049de2205..620871efbf0 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -13,7 +13,11 @@   */  #define SD_TIMEOUT		(30 * HZ)  #define SD_MOD_TIMEOUT		(75 * HZ) -#define SD_FLUSH_TIMEOUT	(60 * HZ) +/* + * Flush timeout is a multiplier over the standard device timeout which is + * user modifiable via sysfs but initially set to SD_TIMEOUT + */ +#define SD_FLUSH_TIMEOUT_MULTIPLIER	2  #define SD_WRITE_SAME_TIMEOUT	(120 * HZ)  /* @@ -100,6 +104,12 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)  		    (sdsk)->disk->disk_name, ##a) :			\  	sdev_printk(prefix, (sdsk)->device, fmt, ##a) +#define sd_first_printk(prefix, sdsk, fmt, a...)			\ +	do {								\ +		if ((sdkp)->first_scan)					\ +			sd_printk(prefix, sdsk, fmt, ##a);		\ +	} while (0) +  static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)  {  	switch (scmd->cmnd[0]) { diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 6174ca4ea27..a7a691d0af7 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,  	struct bio *bio;  	struct scsi_disk *sdkp;  	struct sd_dif_tuple *sdt; -	unsigned int i, j;  	u32 phys, virt;  	sdkp = rq->bio->bi_bdev->bd_disk->private_data; @@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,  	phys = hw_sector & 0xffffffff;  	__rq_for_each_bio(bio, rq) { -		struct bio_vec *iv; +		struct bio_vec iv; +		struct bvec_iter iter; +		unsigned int j;  		/* Already remapped? */  		if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))  			break; -		virt = bio->bi_integrity->bip_sector & 0xffffffff; +		virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff; -		bip_for_each_vec(iv, bio->bi_integrity, i) { -			sdt = kmap_atomic(iv->bv_page) -				+ iv->bv_offset; +		bip_for_each_vec(iv, bio->bi_integrity, iter) { +			sdt = kmap_atomic(iv.bv_page) +				+ iv.bv_offset; -			for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { +			for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {  				if (be32_to_cpu(sdt->ref_tag) == virt)  					sdt->ref_tag = cpu_to_be32(phys); @@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)  	struct scsi_disk *sdkp;  	struct bio *bio;  	struct sd_dif_tuple *sdt; -	unsigned int i, j, sectors, sector_sz; +	unsigned int j, sectors, sector_sz;  	u32 phys, virt;  	sdkp = scsi_disk(scmd->request->rq_disk); @@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)  		phys >>= 3;  	__rq_for_each_bio(bio, scmd->request) { -		struct bio_vec *iv; +		struct bio_vec iv; +		struct bvec_iter iter; -		virt = bio->bi_integrity->bip_sector & 0xffffffff; +		virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff; -		bip_for_each_vec(iv, bio->bi_integrity, i) { -			sdt = kmap_atomic(iv->bv_page) -				+ iv->bv_offset; +		bip_for_each_vec(iv, bio->bi_integrity, iter) { +			sdt = kmap_atomic(iv.bv_page) +				+ iv.bv_offset; -			for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { +			for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {  				if (sectors == 0) {  					kunmap_atomic(sdt); diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index eba183c428c..80bfece1a2d 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -25,6 +25,7 @@  #include <linux/module.h>  #include <linux/kernel.h>  #include <linux/enclosure.h> +#include <asm/unaligned.h>  #include <scsi/scsi.h>  #include <scsi/scsi_cmnd.h> @@ -448,27 +449,18 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,  static void ses_match_to_enclosure(struct enclosure_device *edev,  				   struct scsi_device *sdev)  { -	unsigned char *buf;  	unsigned char *desc; -	unsigned int vpd_len;  	struct efd efd = {  		.addr = 0,  	}; -	buf = kmalloc(INIT_ALLOC_SIZE, GFP_KERNEL); -	if (!buf || scsi_get_vpd_page(sdev, 0x83, buf, INIT_ALLOC_SIZE)) -		goto free; -  	ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0); -	vpd_len = ((buf[2] << 8) | buf[3]) + 4; -	kfree(buf); -	buf = kmalloc(vpd_len, GFP_KERNEL); -	if (!buf ||scsi_get_vpd_page(sdev, 0x83, buf, vpd_len)) -		goto free; +	if (!sdev->vpd_pg83_len) +		return; -	desc = buf + 4; -	while (desc < buf + vpd_len) { +	desc = sdev->vpd_pg83 + 4; +	while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) {  		enum scsi_protocol proto = desc[0] >> 4;  		u8 code_set = desc[0] & 0x0f;  		u8 piv = desc[1] & 0x80; @@ -478,25 +470,15 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,  		if (piv && code_set == 1 && assoc == 1  		    && proto == SCSI_PROTOCOL_SAS && type == 3 && len == 8) -			efd.addr = (u64)desc[4] << 56 | -				(u64)desc[5] << 48 | -				(u64)desc[6] << 40 | -				(u64)desc[7] << 32 | -				(u64)desc[8] << 24 | -				(u64)desc[9] << 16 | -				(u64)desc[10] << 8 | -				(u64)desc[11]; +			efd.addr = get_unaligned_be64(&desc[4]);  		desc += len + 4;  	} -	if (!efd.addr) -		goto free; +	if (efd.addr) { +		efd.dev = &sdev->sdev_gendev; -	efd.dev = &sdev->sdev_gendev; - -	enclosure_for_each_device(ses_enclosure_find_by_addr, &efd); - free: -	kfree(buf); +		enclosure_for_each_device(ses_enclosure_find_by_addr, &efd); +	}  }  static int ses_intf_add(struct device *cdev, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 5cbc4bb1b39..53268aaba55 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -105,8 +105,11 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;  static int sg_add(struct device *, struct class_interface *);  static void sg_remove(struct device *, struct class_interface *); +static DEFINE_SPINLOCK(sg_open_exclusive_lock); +  static DEFINE_IDR(sg_index_idr); -static DEFINE_RWLOCK(sg_index_lock); +static DEFINE_RWLOCK(sg_index_lock);	/* Also used to lock +							   file descriptor list for device */  static struct class_interface sg_interface = {  	.add_dev	= sg_add, @@ -143,7 +146,8 @@ typedef struct sg_request {	/* SG_MAX_QUEUE requests outstanding per file */  } Sg_request;  typedef struct sg_fd {		/* holds the state of a file descriptor */ -	struct list_head sfd_siblings; /* protected by sfd_lock of device */ +	/* sfd_siblings is protected by sg_index_lock */ +	struct list_head sfd_siblings;  	struct sg_device *parentdp;	/* owning device */  	wait_queue_head_t read_wait;	/* queue read until command done */  	rwlock_t rq_list_lock;	/* protect access to list in req_arr */ @@ -166,12 +170,13 @@ typedef struct sg_fd {		/* holds the state of a file descriptor */  typedef struct sg_device { /* holds the state of each scsi generic device */  	struct scsi_device *device; +	wait_queue_head_t o_excl_wait;	/* queue open() when O_EXCL in use */  	int sg_tablesize;	/* adapter's max scatter-gather table size */  	u32 index;		/* device index number */ -	spinlock_t sfd_lock;	/* protect file descriptor list for device */ +	/* sfds is protected by sg_index_lock */  	struct list_head sfds; -	struct rw_semaphore o_sem;	/* exclude open should hold this rwsem */  	volatile char detached;	/* 0->attached, 1->detached pending removal */ +	/* exclude protected by sg_open_exclusive_lock */  	char exclude;		/* opened for exclusive access */  	char sgdebug;		/* 0->off, 1->sense, 9->dump dev, 10-> all devs */  	struct gendisk *disk; @@ -220,14 +225,35 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)  	return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);  } +static int get_exclude(Sg_device *sdp) +{ +	unsigned long flags; +	int ret; + +	spin_lock_irqsave(&sg_open_exclusive_lock, flags); +	ret = sdp->exclude; +	spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); +	return ret; +} + +static int set_exclude(Sg_device *sdp, char val) +{ +	unsigned long flags; + +	spin_lock_irqsave(&sg_open_exclusive_lock, flags); +	sdp->exclude = val; +	spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); +	return val; +} +  static int sfds_list_empty(Sg_device *sdp)  {  	unsigned long flags;  	int ret; -	spin_lock_irqsave(&sdp->sfd_lock, flags); +	read_lock_irqsave(&sg_index_lock, flags);  	ret = list_empty(&sdp->sfds); -	spin_unlock_irqrestore(&sdp->sfd_lock, flags); +	read_unlock_irqrestore(&sg_index_lock, flags);  	return ret;  } @@ -239,6 +265,7 @@ sg_open(struct inode *inode, struct file *filp)  	struct request_queue *q;  	Sg_device *sdp;  	Sg_fd *sfp; +	int res;  	int retval;  	nonseekable_open(inode, filp); @@ -267,52 +294,54 @@ sg_open(struct inode *inode, struct file *filp)  		goto error_out;  	} -	if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) { -		retval = -EPERM; /* Can't lock it with read only access */ -		goto error_out; -	} -	if (flags & O_NONBLOCK) { -		if (flags & O_EXCL) { -			if (!down_write_trylock(&sdp->o_sem)) { -				retval = -EBUSY; -				goto error_out; -			} -		} else { -			if (!down_read_trylock(&sdp->o_sem)) { -				retval = -EBUSY; -				goto error_out; -			} +	if (flags & O_EXCL) { +		if (O_RDONLY == (flags & O_ACCMODE)) { +			retval = -EPERM; /* Can't lock it with read only access */ +			goto error_out; +		} +		if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) { +			retval = -EBUSY; +			goto error_out; +		} +		res = wait_event_interruptible(sdp->o_excl_wait, +					   ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1))); +		if (res) { +			retval = res;	/* -ERESTARTSYS because signal hit process */ +			goto error_out; +		} +	} else if (get_exclude(sdp)) {	/* some other fd has an exclusive lock on dev */ +		if (flags & O_NONBLOCK) { +			retval = -EBUSY; +			goto error_out; +		} +		res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp)); +		if (res) { +			retval = res;	/* -ERESTARTSYS because signal hit process */ +			goto error_out;  		} -	} else { -		if (flags & O_EXCL) -			down_write(&sdp->o_sem); -		else -			down_read(&sdp->o_sem);  	} -	/* Since write lock is held, no need to check sfd_list */ -	if (flags & O_EXCL) -		sdp->exclude = 1;	/* used by release lock */ - +	if (sdp->detached) { +		retval = -ENODEV; +		goto error_out; +	}  	if (sfds_list_empty(sdp)) {	/* no existing opens on this device */  		sdp->sgdebug = 0;  		q = sdp->device->request_queue;  		sdp->sg_tablesize = queue_max_segments(q);  	} -	sfp = sg_add_sfp(sdp, dev); -	if (!IS_ERR(sfp)) +	if ((sfp = sg_add_sfp(sdp, dev)))  		filp->private_data = sfp; -		/* retval is already provably zero at this point because of the -		 * check after retval = scsi_autopm_get_device(sdp->device)) -		 */  	else { -		retval = PTR_ERR(sfp); -  		if (flags & O_EXCL) { -			sdp->exclude = 0;	/* undo if error */ -			up_write(&sdp->o_sem); -		} else -			up_read(&sdp->o_sem); +			set_exclude(sdp, 0);	/* undo if error */ +			wake_up_interruptible(&sdp->o_excl_wait); +		} +		retval = -ENOMEM; +		goto error_out; +	} +	retval = 0;  error_out: +	if (retval) {  		scsi_autopm_put_device(sdp->device);  sdp_put:  		scsi_device_put(sdp->device); @@ -329,18 +358,13 @@ sg_release(struct inode *inode, struct file *filp)  {  	Sg_device *sdp;  	Sg_fd *sfp; -	int excl;  	if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))  		return -ENXIO;  	SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); -	excl = sdp->exclude; -	sdp->exclude = 0; -	if (excl) -		up_write(&sdp->o_sem); -	else -		up_read(&sdp->o_sem); +	set_exclude(sdp, 0); +	wake_up_interruptible(&sdp->o_excl_wait);  	scsi_autopm_put_device(sdp->device);  	kref_put(&sfp->f_ref, sg_remove_sfp); @@ -1391,9 +1415,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)  	disk->first_minor = k;  	sdp->disk = disk;  	sdp->device = scsidp; -	spin_lock_init(&sdp->sfd_lock);  	INIT_LIST_HEAD(&sdp->sfds); -	init_rwsem(&sdp->o_sem); +	init_waitqueue_head(&sdp->o_excl_wait);  	sdp->sg_tablesize = queue_max_segments(q);  	sdp->index = k;  	kref_init(&sdp->d_ref); @@ -1526,13 +1549,11 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)  	/* Need a write lock to set sdp->detached. */  	write_lock_irqsave(&sg_index_lock, iflags); -	spin_lock(&sdp->sfd_lock);  	sdp->detached = 1;  	list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {  		wake_up_interruptible(&sfp->read_wait);  		kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);  	} -	spin_unlock(&sdp->sfd_lock);  	write_unlock_irqrestore(&sg_index_lock, iflags);  	sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); @@ -1632,10 +1653,9 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd)  	if (!rq)  		return -ENOMEM; +	blk_rq_set_block_pc(rq);  	memcpy(rq->cmd, cmd, hp->cmd_len); -  	rq->cmd_len = hp->cmd_len; -	rq->cmd_type = REQ_TYPE_BLOCK_PC;  	srp->rq = rq;  	rq->end_io_data = srp; @@ -2043,7 +2063,7 @@ sg_add_sfp(Sg_device * sdp, int dev)  	sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);  	if (!sfp) -		return ERR_PTR(-ENOMEM); +		return NULL;  	init_waitqueue_head(&sfp->read_wait);  	rwlock_init(&sfp->rq_list_lock); @@ -2057,13 +2077,9 @@ sg_add_sfp(Sg_device * sdp, int dev)  	sfp->cmd_q = SG_DEF_COMMAND_Q;  	sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;  	sfp->parentdp = sdp; -	spin_lock_irqsave(&sdp->sfd_lock, iflags); -	if (sdp->detached) { -		spin_unlock_irqrestore(&sdp->sfd_lock, iflags); -		return ERR_PTR(-ENODEV); -	} +	write_lock_irqsave(&sg_index_lock, iflags);  	list_add_tail(&sfp->sfd_siblings, &sdp->sfds); -	spin_unlock_irqrestore(&sdp->sfd_lock, iflags); +	write_unlock_irqrestore(&sg_index_lock, iflags);  	SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));  	if (unlikely(sg_big_buff != def_reserved_size))  		sg_big_buff = def_reserved_size; @@ -2113,9 +2129,10 @@ static void sg_remove_sfp(struct kref *kref)  	struct sg_device *sdp = sfp->parentdp;  	unsigned long iflags; -	spin_lock_irqsave(&sdp->sfd_lock, iflags); +	write_lock_irqsave(&sg_index_lock, iflags);  	list_del(&sfp->sfd_siblings); -	spin_unlock_irqrestore(&sdp->sfd_lock, iflags); +	write_unlock_irqrestore(&sg_index_lock, iflags); +	wake_up_interruptible(&sdp->o_excl_wait);  	INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);  	schedule_work(&sfp->ew.work); @@ -2502,7 +2519,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)  	return 0;  } -/* must be called while holding sg_index_lock and sfd_lock */ +/* must be called while holding sg_index_lock */  static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)  {  	int k, m, new_interface, blen, usg; @@ -2587,26 +2604,22 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)  	read_lock_irqsave(&sg_index_lock, iflags);  	sdp = it ? sg_lookup_dev(it->index) : NULL; -	if (sdp) { -		spin_lock(&sdp->sfd_lock); -		if (!list_empty(&sdp->sfds)) { -			struct scsi_device *scsidp = sdp->device; +	if (sdp && !list_empty(&sdp->sfds)) { +		struct scsi_device *scsidp = sdp->device; -			seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); -			if (sdp->detached) -				seq_printf(s, "detached pending close "); -			else -				seq_printf -				    (s, "scsi%d chan=%d id=%d lun=%d   em=%d", -				     scsidp->host->host_no, -				     scsidp->channel, scsidp->id, -				     scsidp->lun, -				     scsidp->host->hostt->emulated); -			seq_printf(s, " sg_tablesize=%d excl=%d\n", -				   sdp->sg_tablesize, sdp->exclude); -			sg_proc_debug_helper(s, sdp); -		} -		spin_unlock(&sdp->sfd_lock); +		seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); +		if (sdp->detached) +			seq_printf(s, "detached pending close "); +		else +			seq_printf +			    (s, "scsi%d chan=%d id=%d lun=%d   em=%d", +			     scsidp->host->host_no, +			     scsidp->channel, scsidp->id, +			     scsidp->lun, +			     scsidp->host->hostt->emulated); +		seq_printf(s, " sg_tablesize=%d excl=%d\n", +			   sdp->sg_tablesize, get_exclude(sdp)); +		sg_proc_debug_helper(s, sdp);  	}  	read_unlock_irqrestore(&sg_index_lock, iflags);  	return 0; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 119d67f9c47..93cbd36c990 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -79,6 +79,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);  static DEFINE_MUTEX(sr_mutex);  static int sr_probe(struct device *);  static int sr_remove(struct device *); +static int sr_init_command(struct scsi_cmnd *SCpnt);  static int sr_done(struct scsi_cmnd *);  static int sr_runtime_suspend(struct device *dev); @@ -94,6 +95,7 @@ static struct scsi_driver sr_template = {  		.remove		= sr_remove,  		.pm		= &sr_pm_ops,  	}, +	.init_command		= sr_init_command,  	.done			= sr_done,  }; @@ -161,14 +163,10 @@ static inline struct scsi_cd *scsi_cd_get(struct gendisk *disk)  		goto out;  	cd = scsi_cd(disk);  	kref_get(&cd->kref); -	if (scsi_device_get(cd->device)) -		goto out_put; -	if (!scsi_autopm_get_device(cd->device)) -		goto out; - - out_put: -	kref_put(&cd->kref, sr_kref_release); -	cd = NULL; +	if (scsi_device_get(cd->device)) { +		kref_put(&cd->kref, sr_kref_release); +		cd = NULL; +	}   out:  	mutex_unlock(&sr_ref_mutex);  	return cd; @@ -180,7 +178,6 @@ static void scsi_cd_put(struct scsi_cd *cd)  	mutex_lock(&sr_ref_mutex);  	kref_put(&cd->kref, sr_kref_release); -	scsi_autopm_put_device(sdev);  	scsi_device_put(sdev);  	mutex_unlock(&sr_ref_mutex);  } @@ -383,21 +380,14 @@ static int sr_done(struct scsi_cmnd *SCpnt)  	return good_bytes;  } -static int sr_prep_fn(struct request_queue *q, struct request *rq) +static int sr_init_command(struct scsi_cmnd *SCpnt)  {  	int block = 0, this_count, s_size;  	struct scsi_cd *cd; -	struct scsi_cmnd *SCpnt; -	struct scsi_device *sdp = q->queuedata; +	struct request *rq = SCpnt->request; +	struct scsi_device *sdp = SCpnt->device;  	int ret; -	if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { -		ret = scsi_setup_blk_pc_cmnd(sdp, rq); -		goto out; -	} else if (rq->cmd_type != REQ_TYPE_FS) { -		ret = BLKPREP_KILL; -		goto out; -	}  	ret = scsi_setup_fs_cmnd(sdp, rq);  	if (ret != BLKPREP_OK)  		goto out; @@ -522,7 +512,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)  	 */  	ret = BLKPREP_OK;   out: -	return scsi_prep_return(q, rq, ret); +	return ret;  }  static int sr_block_open(struct block_device *bdev, fmode_t mode) @@ -558,8 +548,6 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,  	void __user *argp = (void __user *)arg;  	int ret; -	scsi_autopm_get_device(cd->device); -  	mutex_lock(&sr_mutex);  	/* @@ -591,7 +579,6 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,  out:  	mutex_unlock(&sr_mutex); -	scsi_autopm_put_device(cd->device);  	return ret;  } @@ -599,17 +586,11 @@ static unsigned int sr_block_check_events(struct gendisk *disk,  					  unsigned int clearing)  {  	struct scsi_cd *cd = scsi_cd(disk); -	unsigned int ret; -	if (atomic_read(&cd->device->disk_events_disable_depth) == 0) { -		scsi_autopm_get_device(cd->device); -		ret = cdrom_check_events(&cd->cdi, clearing); -		scsi_autopm_put_device(cd->device); -	} else { -		ret = 0; -	} +	if (atomic_read(&cd->device->disk_events_disable_depth)) +		return 0; -	return ret; +	return cdrom_check_events(&cd->cdi, clearing);  }  static int sr_block_revalidate_disk(struct gendisk *disk) @@ -617,8 +598,6 @@ static int sr_block_revalidate_disk(struct gendisk *disk)  	struct scsi_cd *cd = scsi_cd(disk);  	struct scsi_sense_hdr sshdr; -	scsi_autopm_get_device(cd->device); -  	/* if the unit is not ready, nothing more to do */  	if (scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr))  		goto out; @@ -626,7 +605,6 @@ static int sr_block_revalidate_disk(struct gendisk *disk)  	sr_cd_check(&cd->cdi);  	get_sectorsize(cd);  out: -	scsi_autopm_put_device(cd->device);  	return 0;  } @@ -735,7 +713,6 @@ static int sr_probe(struct device *dev)  	/* FIXME: need to handle a get_capabilities failure properly ?? */  	get_capabilities(cd); -	blk_queue_prep_rq(sdev->request_queue, sr_prep_fn);  	sr_vendor_init(cd);  	disk->driverfs_dev = &sdev->sdev_gendev; @@ -747,6 +724,12 @@ static int sr_probe(struct device *dev)  	if (register_cdrom(&cd->cdi))  		goto fail_put; +	/* +	 * Initialize block layer runtime PM stuffs before the +	 * periodic event checking request gets started in add_disk. +	 */ +	blk_pm_runtime_init(sdev->request_queue, dev); +  	dev_set_drvdata(dev, cd);  	disk->flags |= GENHD_FL_REMOVABLE;  	add_disk(disk); @@ -1004,7 +987,6 @@ static int sr_remove(struct device *dev)  	scsi_autopm_get_device(cd->device); -	blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);  	del_gendisk(cd->disk);  	mutex_lock(&sr_ref_mutex); diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index ff44b3c2cff..14eb4b256a0 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -484,7 +484,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,  	if (!req)  		return DRIVER_ERROR << 24; -	req->cmd_type = REQ_TYPE_BLOCK_PC; +	blk_rq_set_block_pc(req);  	req->cmd_flags |= REQ_QUIET;  	mdata->null_mapped = 1; @@ -2198,12 +2198,19 @@ static int st_set_options(struct scsi_tape *STp, long options)  	struct st_modedef *STm;  	char *name = tape_name(STp);  	struct cdev *cd0, *cd1; +	struct device *d0, *d1;  	STm = &(STp->modes[STp->current_mode]);  	if (!STm->defined) { -		cd0 = STm->cdevs[0]; cd1 = STm->cdevs[1]; +		cd0 = STm->cdevs[0]; +		cd1 = STm->cdevs[1]; +		d0  = STm->devs[0]; +		d1  = STm->devs[1];  		memcpy(STm, &(STp->modes[0]), sizeof(struct st_modedef)); -		STm->cdevs[0] = cd0; STm->cdevs[1] = cd1; +		STm->cdevs[0] = cd0; +		STm->cdevs[1] = cd1; +		STm->devs[0]  = d0; +		STm->devs[1]  = d1;  		modes_defined = 1;                  DEBC(printk(ST_DEB_MSG                              "%s: Initialized mode %d definition from mode 0\n", @@ -3719,7 +3726,7 @@ static struct st_buffer *new_tape_buffer(int need_dma, int max_sg)  static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dma)  { -	int segs, nbr, max_segs, b_size, order, got; +	int segs, max_segs, b_size, order, got;  	gfp_t priority;  	if (new_size <= STbuffer->buffer_size) @@ -3729,9 +3736,6 @@ static int enlarge_buffer(struct st_buffer * STbuffer, int new_size, int need_dm  		normalize_buffer(STbuffer);  /* Avoid extra segment */  	max_segs = STbuffer->use_sg; -	nbr = max_segs - STbuffer->frp_segs; -	if (nbr <= 0) -		return 0;  	priority = GFP_KERNEL | __GFP_NOWARN;  	if (need_dma) diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 325c31caa6e..1aa4befcfbd 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -1790,8 +1790,6 @@ static void stex_remove(struct pci_dev *pdev)  	scsi_remove_host(hba->host); -	pci_set_drvdata(pdev, NULL); -  	stex_hba_stop(hba);  	stex_hba_free(hba); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 1a28f563279..9969fa1ef7c 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)  {  	struct stor_mem_pools *memp = sdevice->hostdata; +	if (!memp) +		return; +  	mempool_destroy(memp->request_mempool);  	kmem_cache_destroy(memp->request_pool);  	kfree(memp); @@ -1697,6 +1700,7 @@ static struct scsi_host_template scsi_driver = {  	.use_clustering =	DISABLE_CLUSTERING,  	/* Make sure we dont get a sg segment crosses a page boundary */  	.dma_boundary =		PAGE_SIZE-1, +	.no_write_same =	1,  };  enum { diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 636bbe0ea84..88220794cc9 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c @@ -364,7 +364,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)  	return( 0 );      if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=  	TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { -	TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", +	dprintk(NDEBUG_TAGS,  "scsi%d: target %d lun %d: no free tags\n",  		    H_NO(cmd), cmd->device->id, cmd->device->lun );  	return( 1 );      } @@ -388,7 +388,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)  	!setup_use_tagged_queuing || !cmd->device->tagged_supported) {  	cmd->tag = TAG_NONE;  	hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); -	TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " +	dprintk(NDEBUG_TAGS,  "scsi%d: target %d lun %d now allocated by untagged "  		    "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun );      }      else { @@ -397,7 +397,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)  	cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS );  	set_bit( cmd->tag, &ta->allocated );  	ta->nr_allocated++; -	TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " +	dprintk(NDEBUG_TAGS,  "scsi%d: using tag %d for target %d lun %d "  		    "(now %d tags in use)\n",  		    H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun,  		    ta->nr_allocated ); @@ -415,7 +415,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)      if (cmd->tag == TAG_NONE) {  	hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); -	TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", +	dprintk(NDEBUG_TAGS,  "scsi%d: target %d lun %d untagged cmd finished\n",  		    H_NO(cmd), cmd->device->id, cmd->device->lun );      }      else if (cmd->tag >= MAX_TAGS) { @@ -426,7 +426,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)  	TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];  	clear_bit( cmd->tag, &ta->allocated );  	ta->nr_allocated--; -	TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", +	dprintk(NDEBUG_TAGS,  "scsi%d: freed tag %d for target %d lun %d\n",  		    H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun );      }  } @@ -484,7 +484,7 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)  #include <linux/delay.h> -#if 1 +#if NDEBUG  static struct {      unsigned char mask;      const char * name;}  @@ -572,12 +572,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)      }  } -#else /* !NDEBUG */ - -/* dummies... */ -__inline__ void NCR5380_print(struct Scsi_Host *instance) { }; -__inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; -  #endif  /* @@ -618,7 +612,7 @@ static inline void NCR5380_all_init (void)  {      static int done = 0;      if (!done) { -	INI_PRINTK("scsi : NCR5380_all_init()\n"); +	dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");  	done = 1;      }  } @@ -681,8 +675,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)  	Scsi_Cmnd *ptr;  	unsigned long flags; -	NCR_PRINT(NDEBUG_ANY); -	NCR_PRINT_PHASE(NDEBUG_ANY); +	NCR5380_dprint(NDEBUG_ANY, instance); +	NCR5380_dprint_phase(NDEBUG_ANY, instance);  	hostdata = (struct NCR5380_hostdata *)instance->hostdata; @@ -928,7 +922,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,      local_irq_restore(flags); -    QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), +    dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),  	      (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");      /* If queue_command() is called from an interrupt (real one or bottom @@ -998,7 +992,7 @@ static void NCR5380_main (struct work_struct *bl)  	done = 1;  	if (!hostdata->connected) { -	    MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); +	    dprintk(NDEBUG_MAIN,  "scsi%d: not connected\n", HOSTNO );  	    /*  	     * Search through the issue_queue for a command destined  	     * for a target that's not busy. @@ -1012,12 +1006,8 @@ static void NCR5380_main (struct work_struct *bl)  	    for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,  		 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { -#if (NDEBUG & NDEBUG_LISTS)  		if (prev != tmp) -		    printk("MAIN tmp=%p   target=%d   busy=%d lun=%d\n", -			   tmp, tmp->target, hostdata->busy[tmp->target], -			   tmp->lun); -#endif +			dprintk(NDEBUG_LISTS, "MAIN tmp=%p   target=%d   busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);  		/*  When we find one, remove it from the issue queue. */  		/* ++guenther: possible race with Falcon locking */  		if ( @@ -1047,9 +1037,9 @@ static void NCR5380_main (struct work_struct *bl)  		     * On failure, we must add the command back to the  		     *   issue queue so we can keep trying.	  		     */ -		    MAIN_PRINTK("scsi%d: main(): command for target %d " +		    dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "  				"lun %d removed from issue_queue\n", -				HOSTNO, tmp->target, tmp->lun); +				HOSTNO, tmp->device->id, tmp->device->lun);  		    /*   		     * REQUEST SENSE commands are issued without tagged  		     * queueing, even on SCSI-II devices because the  @@ -1076,7 +1066,7 @@ static void NCR5380_main (struct work_struct *bl)  			cmd_free_tag( tmp );  #endif  			local_irq_restore(flags); -			MAIN_PRINTK("scsi%d: main(): select() failed, " +			dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "  				    "returned to issue_queue\n", HOSTNO);  			if (hostdata->connected)  			    break; @@ -1090,10 +1080,10 @@ static void NCR5380_main (struct work_struct *bl)  #endif  	    ) {  	    local_irq_restore(flags); -	    MAIN_PRINTK("scsi%d: main: performing information transfer\n", +	    dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",  			HOSTNO);  	    NCR5380_information_transfer(instance); -	    MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); +	    dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);  	    done = 0;  	}      } while (!done); @@ -1130,7 +1120,7 @@ static void NCR5380_dma_complete( struct Scsi_Host *instance )  	return;      } -    DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", +    dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",  	       HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),  	       NCR5380_read(STATUS_REG)); @@ -1189,27 +1179,27 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)      int done = 1, handled = 0;      unsigned char basr; -    INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); +    dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);      /* Look for pending interrupts */      basr = NCR5380_read(BUS_AND_STATUS_REG); -    INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); +    dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);      /* dispatch to appropriate routine if found and done=0 */      if (basr & BASR_IRQ) { -	NCR_PRINT(NDEBUG_INTR); +	NCR5380_dprint(NDEBUG_INTR, instance);  	if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {  	    done = 0;  //	    ENABLE_IRQ(); -	    INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); +	    dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);  	    NCR5380_reselect(instance);  	    (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);  	}  	else if (basr & BASR_PARITY_ERROR) { -	    INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); +	    dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);  	    (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);  	}  	else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { -	    INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); +	    dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);  	    (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);  	}  	else { @@ -1229,7 +1219,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)  		((basr & BASR_END_DMA_TRANSFER) ||   		 !(basr & BASR_PHASE_MATCH))) { -		INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); +		dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);  		NCR5380_dma_complete( instance );  		done = 0;  //		ENABLE_IRQ(); @@ -1238,7 +1228,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)  	    {  /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */  		if (basr & BASR_PHASE_MATCH) -		   INT_PRINTK("scsi%d: unknown interrupt, " +		   dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "  			   "BASR 0x%x, MR 0x%x, SR 0x%x\n",  			   HOSTNO, basr, NCR5380_read(MODE_REG),  			   NCR5380_read(STATUS_REG)); @@ -1262,7 +1252,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)      }      if (!done) { -	INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); +	dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);  	/* Put a call to NCR5380_main() on the queue... */  	queue_main();      } @@ -1338,8 +1328,8 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,      unsigned long flags;      hostdata->restart_select = 0; -    NCR_PRINT(NDEBUG_ARBITRATION); -    ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, +    NCR5380_dprint(NDEBUG_ARBITRATION, instance); +    dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,  	       instance->this_id);      /*  @@ -1385,7 +1375,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,  	 && !hostdata->connected);  #endif -    ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); +    dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);      if (hostdata->connected) {  	NCR5380_write(MODE_REG, MR_BASE);  @@ -1406,7 +1396,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,  	(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||  	hostdata->connected) {  	NCR5380_write(MODE_REG, MR_BASE);  -	ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", +	dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",  		   HOSTNO);  	return -1;      } @@ -1421,7 +1411,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,  	hostdata->connected) {  	NCR5380_write(MODE_REG, MR_BASE);  	NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); -	ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", +	dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",  		   HOSTNO);  	return -1;      } @@ -1444,7 +1434,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,  	return -1;      } -    ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); +    dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);      /*        * Now that we have won arbitration, start Selection process, asserting  @@ -1504,7 +1494,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,      udelay(1); -    SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); +    dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);      /*        * The SCSI specification calls for a 250 ms timeout for the actual  @@ -1559,7 +1549,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,  	    printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);  	    if (hostdata->restart_select)  		printk(KERN_NOTICE "\trestart select\n"); -	    NCR_PRINT(NDEBUG_ANY); +	    NCR5380_dprint(NDEBUG_ANY, instance);  	    NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);  	    return -1;  	} @@ -1572,7 +1562,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,  #endif  	cmd->scsi_done(cmd);  	NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); -	SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); +	dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);  	NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);  	return 0;      }  @@ -1597,7 +1587,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,      /* Wait for start of REQ/ACK handshake */      while (!(NCR5380_read(STATUS_REG) & SR_REQ)); -    SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", +    dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",  	       HOSTNO, cmd->device->id);      tmp[0] = IDENTIFY(1, cmd->device->lun); @@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,      data = tmp;      phase = PHASE_MSGOUT;      NCR5380_transfer_pio(instance, &phase, &len, &data); -    SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); +    dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);      /* XXX need to handle errors here */      hostdata->connected = cmd;  #ifndef SUPPORT_TAGS @@ -1680,12 +1670,12 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,  	 */  	while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); -	HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); +	dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);  	/* Check for phase mismatch */	  	if ((tmp & PHASE_MASK) != p) { -	    PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); -	    NCR_PRINT_PHASE(NDEBUG_PIO); +	    dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO); +	    NCR5380_dprint_phase(NDEBUG_PIO, instance);  	    break;  	} @@ -1708,24 +1698,24 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,  	    if (!((p & SR_MSG) && c > 1)) {  		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |   		    ICR_ASSERT_DATA); -		NCR_PRINT(NDEBUG_PIO); +		NCR5380_dprint(NDEBUG_PIO, instance);  		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |   			ICR_ASSERT_DATA | ICR_ASSERT_ACK);  	    } else {  		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |  		    ICR_ASSERT_DATA | ICR_ASSERT_ATN); -		NCR_PRINT(NDEBUG_PIO); +		NCR5380_dprint(NDEBUG_PIO, instance);  		NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |   		    ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);  	    }  	} else { -	    NCR_PRINT(NDEBUG_PIO); +	    NCR5380_dprint(NDEBUG_PIO, instance);  	    NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);  	}  	while (NCR5380_read(STATUS_REG) & SR_REQ); -	HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); +	dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);  /*   * We have several special cases to consider during REQ/ACK handshaking :  @@ -1746,7 +1736,7 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,  	}       } while (--c); -    PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); +    dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);      *count = c;      *data = d; @@ -1854,7 +1844,7 @@ static int NCR5380_transfer_dma( struct Scsi_Host *instance,      }      hostdata->dma_len = c; -    DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", +    dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",  	       HOSTNO, (p & SR_IO) ? "reading" : "writing",  	       c, (p & SR_IO) ? "to" : "from", *data); @@ -1931,7 +1921,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  	    phase = (tmp & PHASE_MASK);    	    if (phase != old_phase) {  		old_phase = phase; -		NCR_PRINT_PHASE(NDEBUG_INFORMATION); +		NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);  	    }  	    if(phase == PHASE_CMDOUT) { @@ -1996,7 +1986,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  		    --cmd->SCp.buffers_residual;  		    cmd->SCp.this_residual = cmd->SCp.buffer->length;  		    cmd->SCp.ptr = SGADDR(cmd->SCp.buffer); -		    INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", +		    dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",  			       HOSTNO, cmd->SCp.this_residual,  			       cmd->SCp.buffers_residual);  		} @@ -2088,7 +2078,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  		    /* Accept message by clearing ACK */  		    NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); -		    LNK_PRINTK("scsi%d: target %d lun %d linked command " +		    dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "  			       "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);  		    /* Enable reselect interrupts */ @@ -2113,7 +2103,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  		     * and don't free it! */  		    cmd->next_link->tag = cmd->tag;  		    cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);  -		    LNK_PRINTK("scsi%d: target %d lun %d linked request " +		    dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "  			       "done, calling scsi_done().\n",  			       HOSTNO, cmd->device->id, cmd->device->lun);  #ifdef NCR5380_STATS @@ -2128,7 +2118,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  		    /* Accept message by clearing ACK */  		    NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);  		    hostdata->connected = NULL; -		    QU_PRINTK("scsi%d: command for target %d, lun %d " +		    dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "  			      "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);  #ifdef SUPPORT_TAGS  		    cmd_free_tag( cmd ); @@ -2142,7 +2132,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  			/* ++Andreas: the mid level code knows about  			   QUEUE_FULL now. */  			TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; -			TAG_PRINTK("scsi%d: target %d lun %d returned " +			dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "  				   "QUEUE_FULL after %d commands\n",  				   HOSTNO, cmd->device->id, cmd->device->lun,  				   ta->nr_allocated); @@ -2186,7 +2176,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  		    if ((cmd->cmnd[0] != REQUEST_SENSE) &&   			(status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {  			scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0); -			ASEN_PRINTK("scsi%d: performing request sense\n", +			dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n",  				    HOSTNO);  			/* this is initialized from initialize_SCp   			cmd->SCp.buffer = NULL; @@ -2198,7 +2188,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  			SET_NEXT(cmd, hostdata->issue_queue);  		        hostdata->issue_queue = (struct scsi_cmnd *) cmd;  		        local_irq_restore(flags); -			QU_PRINTK("scsi%d: REQUEST SENSE added to head of " +			dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "  				  "issue queue\n", H_NO(cmd));  		   } else  #endif /* def AUTOSENSE */ @@ -2238,7 +2228,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  			cmd->device->tagged_supported = 0;  			hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);  			cmd->tag = TAG_NONE; -			TAG_PRINTK("scsi%d: target %d lun %d rejected " +			dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "  				   "QUEUE_TAG message; tagged queuing "  				   "disabled\n",  				   HOSTNO, cmd->device->id, cmd->device->lun); @@ -2255,7 +2245,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  		    hostdata->connected = NULL;  		    hostdata->disconnected_queue = cmd;  		    local_irq_restore(flags); -		    QU_PRINTK("scsi%d: command for target %d lun %d was " +		    dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "  			      "moved from connected to the "  			      "disconnected_queue\n", HOSTNO,   			      cmd->device->id, cmd->device->lun); @@ -2308,13 +2298,13 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  		    /* Accept first byte by clearing ACK */  		    NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); -		    EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); +		    dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);  		    len = 2;  		    data = extended_msg + 1;  		    phase = PHASE_MSGIN;  		    NCR5380_transfer_pio(instance, &phase, &len, &data); -		    EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, +		    dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,  			       (int)extended_msg[1], (int)extended_msg[2]);  		    if (!len && extended_msg[1] <=  @@ -2326,7 +2316,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  			phase = PHASE_MSGIN;  			NCR5380_transfer_pio(instance, &phase, &len, &data); -			EXT_PRINTK("scsi%d: message received, residual %d\n", +			dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",  				   HOSTNO, len);  			switch (extended_msg[2]) { @@ -2416,7 +2406,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)  		break;  	    default:  		printk("scsi%d: unknown phase\n", HOSTNO); -		NCR_PRINT(NDEBUG_ANY); +		NCR5380_dprint(NDEBUG_ANY, instance);  	    } /* switch(phase) */  	} /* if (tmp * SR_REQ) */       } /* while (1) */ @@ -2458,7 +2448,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance)      target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); -    RSL_PRINTK("scsi%d: reselect\n", HOSTNO); +    dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);      /*        * At this point, we have detected that our SCSI ID is on the bus, @@ -2580,14 +2570,14 @@ static void NCR5380_reselect (struct Scsi_Host *instance)  	if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&  	    msg[1] == SIMPLE_QUEUE_TAG)  	    tag = msg[2]; -	TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " +	dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "  		   "reselection\n", HOSTNO, target_mask, lun, tag);      }  #endif      hostdata->connected = tmp; -    RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", -	       HOSTNO, tmp->target, tmp->lun, tmp->tag); +    dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", +	       HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);  } @@ -2622,7 +2612,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)      local_irq_save(flags); -    ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, +    dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,  		NCR5380_read(BUS_AND_STATUS_REG),  		NCR5380_read(STATUS_REG)); @@ -2635,7 +2625,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)      if (hostdata->connected == cmd) { -	ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); +	dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);  /*   * We should perform BSY checking, and make sure we haven't slipped   * into BUS FREE. @@ -2664,11 +2654,11 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)  #endif  	  local_irq_restore(flags);  	  cmd->scsi_done(cmd); -	  return SCSI_ABORT_SUCCESS; +	  return SUCCESS;  	} else {  /*	  local_irq_restore(flags); */  	  printk("scsi%d: abort of connected command failed!\n", HOSTNO); -	  return SCSI_ABORT_ERROR; +	  return FAILED;  	}      }  #endif @@ -2686,12 +2676,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)  	    SET_NEXT(tmp, NULL);  	    tmp->result = DID_ABORT << 16;  	    local_irq_restore(flags); -	    ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", +	    dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",  			HOSTNO);  	    /* Tagged queuing note: no tag to free here, hasn't been assigned  	     * yet... */  	    tmp->scsi_done(tmp); -	    return SCSI_ABORT_SUCCESS; +	    return SUCCESS;  	}  /*  @@ -2707,8 +2697,8 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)      if (hostdata->connected) {  	local_irq_restore(flags); -	ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); -        return SCSI_ABORT_SNOOZE; +	dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO); +        return FAILED;      }  /* @@ -2740,12 +2730,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)  	 tmp = NEXT(tmp))           if (cmd == tmp) {              local_irq_restore(flags); -	    ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); +	    dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);              if (NCR5380_select (instance, cmd, (int) cmd->tag))  -		return SCSI_ABORT_BUSY; +		return FAILED; -	    ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); +	    dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);  	    do_abort (instance); @@ -2769,7 +2759,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)  #endif  		    local_irq_restore(flags);  		    tmp->scsi_done(tmp); -		    return SCSI_ABORT_SUCCESS; +		    return SUCCESS;  		}  	} @@ -2786,7 +2776,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)      local_irq_restore(flags);      printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO);  -    return SCSI_ABORT_NOT_RUNNING; +    return FAILED;  } @@ -2795,7 +2785,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)   *    * Purpose : reset the SCSI bus.   * - * Returns : SCSI_RESET_WAKEUP + * Returns : SUCCESS or FAILURE   *   */  @@ -2804,7 +2794,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)      SETUP_HOSTDATA(cmd->device->host);      int           i;      unsigned long flags; -#if 1 +#if defined(RESET_RUN_DONE)      struct scsi_cmnd *connected, *disconnected_queue;  #endif @@ -2826,8 +2816,15 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)       * through anymore ... */      (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); -#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ -      /* XXX see below                                            XXX */ +	/* MSch 20140115 - looking at the generic NCR5380 driver, all of this +	 * should go. +	 * Catch-22: if we don't clear all queues, the SCSI driver lock will +	 * not be released by atari_scsi_reset()! +	 */ + +#if defined(RESET_RUN_DONE) +	/* XXX Should now be done by midlevel code, but it's broken XXX */ +	/* XXX see below                                            XXX */      /* MSch: old-style reset: actually abort all command processing here */ @@ -2857,7 +2854,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)       */      if ((cmd = connected)) { -	ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); +	dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));  	cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);  	cmd->scsi_done( cmd );      } @@ -2869,14 +2866,14 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)  	cmd->scsi_done( cmd );      }      if (i > 0) -	ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); +	dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);      /* since all commands have been explicitly terminated, we need to tell       * the midlevel code that the reset was SUCCESSFUL, and there is no        * need to 'wake up' the commands by a request_sense       */ -    return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; +    return SUCCESS;  #else /* 1 */      /* MSch: new-style reset handling: let the mid-level do what it can */ @@ -2903,11 +2900,11 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)       */      if (hostdata->issue_queue) -	ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); +	dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));      if (hostdata->connected)  -	ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); +	dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));      if (hostdata->disconnected_queue) -	ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); +	dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));      local_irq_save(flags);      hostdata->issue_queue = NULL; @@ -2924,7 +2921,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)      local_irq_restore(flags);      /* we did no complete reset of all commands, so a wakeup is required */ -    return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; +    return SUCCESS;  #endif /* 1 */  } diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index e2c009b033c..9707b7494a8 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -3,6 +3,10 @@   *   * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net)   * + * VME support added by Sam Creasey + * + * TODO: modify this driver to support multiple Sun3 SCSI VME boards + *   * Adapted from mac_scsinew.c:   */  /* @@ -45,10 +49,6 @@   * USLEEP - enable support for devices that don't disconnect.  Untested.   */ -/* - * $Log: sun3_NCR5380.c,v $ - */ -  #define AUTOSENSE  #include <linux/types.h> @@ -69,23 +69,15 @@  #include <asm/idprom.h>  #include <asm/machines.h> -#define NDEBUG 0 - -#define NDEBUG_ABORT		0x00100000 -#define NDEBUG_TAGS		0x00200000 -#define NDEBUG_MERGING		0x00400000 -  /* dma on! */  #define REAL_DMA  #include "scsi.h" -#include "initio.h"  #include <scsi/scsi_host.h>  #include "sun3_scsi.h" +#include "NCR5380.h" -static void NCR5380_print(struct Scsi_Host *instance); - -/* #define OLDDMA */ +extern int sun3_map_test(unsigned long, char *);  #define USE_WRAPPER  /*#define RESET_BOOT */ @@ -101,7 +93,11 @@ static void NCR5380_print(struct Scsi_Host *instance);  /* #define SUPPORT_TAGS */ +#ifdef SUN3_SCSI_VME +#define ENABLE_IRQ() +#else  #define	ENABLE_IRQ()	enable_irq( IRQ_SUN3_SCSI );  +#endif  static irqreturn_t scsi_sun3_intr(int irq, void *dummy); @@ -123,6 +119,8 @@ module_param(setup_hostid, int, 0);  static struct scsi_cmnd *sun3_dma_setup_done = NULL; +#define	RESET_RUN_DONE +  #define	AFTER_RESET_DELAY	(HZ/2)  /* ms to wait after hitting dma regs */ @@ -136,10 +134,9 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL;  static volatile unsigned char *sun3_scsi_regp;  static volatile struct sun3_dma_regs *dregs; -#ifdef OLDDMA -static unsigned char *dmabuf = NULL; /* dma memory buffer */ -#endif +#ifndef SUN3_SCSI_VME  static struct sun3_udc_regs *udc_regs = NULL; +#endif  static unsigned char *sun3_dma_orig_addr = NULL;  static unsigned long sun3_dma_orig_count = 0;  static int sun3_dma_active = 0; @@ -159,6 +156,7 @@ static inline void sun3scsi_write(int reg, int value)  	sun3_scsi_regp[reg] = value;  } +#ifndef SUN3_SCSI_VME  /* dma controller register access functions */  static inline unsigned short sun3_udc_read(unsigned char reg) @@ -180,6 +178,7 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg)  	dregs->udc_data = val;  	udelay(SUN3_DMA_DELAY);  } +#endif  /*   * XXX: status debug @@ -198,17 +197,32 @@ static struct Scsi_Host *default_instance;   *   */ -int __init sun3scsi_detect(struct scsi_host_template * tpnt) +static int __init sun3scsi_detect(struct scsi_host_template *tpnt)  { -	unsigned long ioaddr; +	unsigned long ioaddr, irq;  	static int called = 0;  	struct Scsi_Host *instance; +#ifdef SUN3_SCSI_VME +	int i; +	unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, +				   IOBASE_SUN3_VMESCSI + 0x4000, +				   0 }; +	unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, +				  SUN3_VEC_VMESCSI1, +				  0 }; +#endif  	/* check that this machine has an onboard 5380 */  	switch(idprom->id_machtype) { +#ifdef SUN3_SCSI_VME +	case SM_SUN3|SM_3_160: +	case SM_SUN3|SM_3_260: +		break; +#else  	case SM_SUN3|SM_3_50:  	case SM_SUN3|SM_3_60:  		break; +#endif  	default:  		return 0; @@ -217,7 +231,11 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)  	if(called)  		return 0; +#ifdef SUN3_SCSI_VME +	tpnt->proc_name = "Sun3 5380 VME SCSI"; +#else  	tpnt->proc_name = "Sun3 5380 SCSI"; +#endif  	/* setup variables */  	tpnt->can_queue = @@ -234,6 +252,38 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)  		tpnt->this_id = 7;  	} +#ifdef SUN3_SCSI_VME +	ioaddr = 0; +	for (i = 0; addrs[i] != 0; i++) { +		unsigned char x; + +		ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, +						     SUN3_PAGE_TYPE_VME16); +		irq = vecs[i]; +		sun3_scsi_regp = (unsigned char *)ioaddr; + +		dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); + +		if (sun3_map_test((unsigned long)dregs, &x)) { +			unsigned short oldcsr; + +			oldcsr = dregs->csr; +			dregs->csr = 0; +			udelay(SUN3_DMA_DELAY); +			if (dregs->csr == 0x1400) +				break; + +			dregs->csr = oldcsr; +		} + +		iounmap((void *)ioaddr); +		ioaddr = 0; +	} + +	if (!ioaddr) +		return 0; +#else +	irq = IRQ_SUN3_SCSI;  	ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE);  	sun3_scsi_regp = (unsigned char *)ioaddr; @@ -244,11 +294,6 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)  	     printk("SUN3 Scsi couldn't allocate DVMA memory!\n");  	     return 0;  	} -#ifdef OLDDMA -	if((dmabuf = dvma_malloc_align(SUN3_DVMA_BUFSIZE, 0x10000)) == NULL) { -	     printk("SUN3 Scsi couldn't allocate DVMA memory!\n"); -	     return 0; -	}  #endif  #ifdef SUPPORT_TAGS  	if (setup_use_tagged_queuing < 0) @@ -262,7 +307,7 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)  	default_instance = instance;          instance->io_port = (unsigned long) ioaddr; -	instance->irq = IRQ_SUN3_SCSI; +	instance->irq = irq;  	NCR5380_init(instance, 0); @@ -283,7 +328,8 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)  #endif  	} -	printk("scsi%d: Sun3 5380 at port %lX irq", instance->host_no, instance->io_port); +	pr_info("scsi%d: %s at port %lX irq", instance->host_no, +		tpnt->proc_name, instance->io_port);  	if (instance->irq == SCSI_IRQ_NONE)  		printk ("s disabled");  	else @@ -300,6 +346,15 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)  	dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;  	udelay(SUN3_DMA_DELAY);  	dregs->fifo_count = 0; +#ifdef SUN3_SCSI_VME +	dregs->fifo_count_hi = 0; +	dregs->dma_addr_hi = 0; +	dregs->dma_addr_lo = 0; +	dregs->dma_count_hi = 0; +	dregs->dma_count_lo = 0; + +	dregs->ivect = VME_DATA24 | (instance->irq & 0xff); +#endif  	called = 1; @@ -367,7 +422,8 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance)  }  #endif -const char * sun3scsi_info (struct Scsi_Host *spnt) { +static const char *sun3scsi_info(struct Scsi_Host *spnt) +{      return "";  } @@ -379,6 +435,10 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy)  	unsigned short csr = dregs->csr;  	int handled = 0; +#ifdef SUN3_SCSI_VME +	dregs->csr &= ~CSR_DMA_ENABLE; +#endif +  	if(csr & ~CSR_GOOD) {  		if(csr & CSR_DMA_BUSERR) {  			printk("scsi%d: bus error in dma\n", default_instance->host_no); @@ -422,31 +482,28 @@ void sun3_sun3_debug (void)  /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */  static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag)  { -#ifdef OLDDMA -	if(write_flag)  -		memcpy(dmabuf, data, count); -	else { -		sun3_dma_orig_addr = data; -		sun3_dma_orig_count = count; -	} -#else  	void *addr;  	if(sun3_dma_orig_addr != NULL)  		dvma_unmap(sun3_dma_orig_addr); -//	addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); +#ifdef SUN3_SCSI_VME +	addr = (void *)dvma_map_vme((unsigned long) data, count); +#else  	addr = (void *)dvma_map((unsigned long) data, count); +#endif  	sun3_dma_orig_addr = addr;  	sun3_dma_orig_count = count; -#endif + +#ifndef SUN3_SCSI_VME  	dregs->fifo_count = 0;  	sun3_udc_write(UDC_RESET, UDC_CSR);  	/* reset fifo */  	dregs->csr &= ~CSR_FIFO;  	dregs->csr |= CSR_FIFO; +#endif  	/* set direction */  	if(write_flag) @@ -454,6 +511,17 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri  	else  		dregs->csr &= ~CSR_SEND; +#ifdef SUN3_SCSI_VME +	dregs->csr |= CSR_PACK_ENABLE; + +	dregs->dma_addr_hi = ((unsigned long)addr >> 16); +	dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); + +	dregs->dma_count_hi = 0; +	dregs->dma_count_lo = 0; +	dregs->fifo_count_hi = 0; +	dregs->fifo_count = 0; +#else  	/* byte count for fifo */  	dregs->fifo_count = count; @@ -467,17 +535,12 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri  		printk("scsi%d: fifo_mismatch %04x not %04x\n",  		       default_instance->host_no, dregs->fifo_count,  		       (unsigned int) count); -		NCR5380_print(default_instance); +		NCR5380_dprint(NDEBUG_DMA, default_instance);  	}  	/* setup udc */ -#ifdef OLDDMA -	udc_regs->addr_hi = ((dvma_vtob(dmabuf) & 0xff0000) >> 8); -	udc_regs->addr_lo = (dvma_vtob(dmabuf) & 0xffff); -#else  	udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8);  	udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff); -#endif  	udc_regs->count = count/2; /* count in words */  	udc_regs->mode_hi = UDC_MODE_HIWORD;  	if(write_flag) { @@ -501,11 +564,13 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri  	/* interrupt enable */  	sun3_udc_write(UDC_INT_ENABLE, UDC_CSR); +#endif         	return count;  } +#ifndef SUN3_SCSI_VME  static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance)  {  	unsigned short resid; @@ -518,6 +583,7 @@ static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance)  	return (unsigned long) resid;  } +#endif  static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)  { @@ -536,8 +602,23 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,  static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)  { +#ifdef SUN3_SCSI_VME +	unsigned short csr; + +	csr = dregs->csr; +	dregs->dma_count_hi = (sun3_dma_orig_count >> 16); +	dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); + +	dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); +	dregs->fifo_count = (sun3_dma_orig_count & 0xffff); + +/*	if(!(csr & CSR_DMA_ENABLE)) + *		dregs->csr |= CSR_DMA_ENABLE; + */ +#else      sun3_udc_write(UDC_CHN_START, UDC_CSR); +#endif      return 0;  } @@ -545,12 +626,46 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)  /* clean up after our dma is done */  static int sun3scsi_dma_finish(int write_flag)  { -	unsigned short count; +	unsigned short __maybe_unused count;  	unsigned short fifo;  	int ret = 0;  	sun3_dma_active = 0; -#if 1 + +#ifdef SUN3_SCSI_VME +	dregs->csr &= ~CSR_DMA_ENABLE; + +	fifo = dregs->fifo_count; +	if (write_flag) { +		if ((fifo > 0) && (fifo < sun3_dma_orig_count)) +			fifo++; +	} + +	last_residual = fifo; +	/* empty bytes from the fifo which didn't make it */ +	if ((!write_flag) && (dregs->csr & CSR_LEFT)) { +		unsigned char *vaddr; + +		vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); + +		vaddr += (sun3_dma_orig_count - fifo); +		vaddr--; + +		switch (dregs->csr & CSR_LEFT) { +		case CSR_LEFT_3: +			*vaddr = (dregs->bpack_lo & 0xff00) >> 8; +			vaddr--; + +		case CSR_LEFT_2: +			*vaddr = (dregs->bpack_hi & 0x00ff); +			vaddr--; + +		case CSR_LEFT_1: +			*vaddr = (dregs->bpack_hi & 0xff00) >> 8; +			break; +		} +	} +#else  	// check to empty the fifo on a read  	if(!write_flag) {  		int tmo = 20000; /* .2 sec */ @@ -566,28 +681,8 @@ static int sun3scsi_dma_finish(int write_flag)  			udelay(10);  		}  	} -		 -#endif  	count = sun3scsi_dma_count(default_instance); -#ifdef OLDDMA - -	/* if we've finished a read, copy out the data we read */ - 	if(sun3_dma_orig_addr) { -		/* check for residual bytes after dma end */ -		if(count && (NCR5380_read(BUS_AND_STATUS_REG) & -			     (BASR_PHASE_MATCH | BASR_ACK))) { -			printk("scsi%d: sun3_scsi_finish: read overrun baby... ", default_instance->host_no); -			printk("basr now %02x\n", NCR5380_read(BUS_AND_STATUS_REG)); -			ret = count; -		} -		 -		/* copy in what we dma'd no matter what */ -		memcpy(sun3_dma_orig_addr, dmabuf, sun3_dma_orig_count); -		sun3_dma_orig_addr = NULL; - -	} -#else  	fifo = dregs->fifo_count;  	last_residual = fifo; @@ -605,10 +700,23 @@ static int sun3scsi_dma_finish(int write_flag)  		vaddr[-2] = (data & 0xff00) >> 8;  		vaddr[-1] = (data & 0xff);  	} +#endif  	dvma_unmap(sun3_dma_orig_addr);  	sun3_dma_orig_addr = NULL; -#endif + +#ifdef SUN3_SCSI_VME +	dregs->dma_addr_hi = 0; +	dregs->dma_addr_lo = 0; +	dregs->dma_count_hi = 0; +	dregs->dma_count_lo = 0; + +	dregs->fifo_count = 0; +	dregs->fifo_count_hi = 0; + +	dregs->csr &= ~CSR_SEND; +/*	dregs->csr |= CSR_DMA_ENABLE; */ +#else  	sun3_udc_write(UDC_RESET, UDC_CSR);  	dregs->fifo_count = 0;  	dregs->csr &= ~CSR_SEND; @@ -616,6 +724,7 @@ static int sun3scsi_dma_finish(int write_flag)  	/* reset fifo */  	dregs->csr &= ~CSR_FIFO;  	dregs->csr |= CSR_FIFO; +#endif  	sun3_dma_setup_done = NULL; diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h index a8da9c710fe..e96a37cf06a 100644 --- a/drivers/scsi/sun3_scsi.h +++ b/drivers/scsi/sun3_scsi.h @@ -29,12 +29,8 @@   * 1+ (800) 334-5454   */ -/* - * $Log: cumana_NCR5380.h,v $ - */ - -#ifndef SUN3_NCR5380_H -#define SUN3_NCR5380_H +#ifndef SUN3_SCSI_H +#define SUN3_SCSI_H  #define SUN3SCSI_PUBLIC_RELEASE 1 @@ -82,8 +78,6 @@ static int sun3scsi_release (struct Scsi_Host *);  #define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI"  #endif -#ifndef HOSTS_C -  #define NCR5380_implementation_fields \      int port, ctrl @@ -108,9 +102,6 @@ static int sun3scsi_release (struct Scsi_Host *);  #define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0)  #define NCR5380_dma_residual sun3scsi_dma_residual -#define BOARD_NORMAL	0 -#define BOARD_NCR53C400	1 -  /* additional registers - mainly DMA control regs */  /* these start at regbase + 8 -- directly after the NCR regs */  struct sun3_dma_regs { @@ -191,189 +182,5 @@ struct sun3_udc_regs {  #define VME_DATA24 0x3d00 -// debugging printk's, taken from atari_scsi.h  -/* Debugging printk definitions: - * - *  ARB  -> arbitration - *  ASEN -> auto-sense - *  DMA  -> DMA - *  HSH  -> PIO handshake - *  INF  -> information transfer - *  INI  -> initialization - *  INT  -> interrupt - *  LNK  -> linked commands - *  MAIN -> NCR5380_main() control flow - *  NDAT -> no data-out phase - *  NWR  -> no write commands - *  PIO  -> PIO transfers - *  PDMA -> pseudo DMA (unused on Atari) - *  QU   -> queues - *  RSL  -> reselections - *  SEL  -> selections - *  USL  -> usleep cpde (unused on Atari) - *  LBS  -> last byte sent (unused on Atari) - *  RSS  -> restarting of selections - *  EXT  -> extended messages - *  ABRT -> aborting and resetting - *  TAG  -> queue tag handling - *  MER  -> merging of consec. buffers - * - */ - -#include "NCR5380.h" - -#if NDEBUG & NDEBUG_ARBITRATION -#define ARB_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define ARB_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_AUTOSENSE -#define ASEN_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define ASEN_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_DMA -#define DMA_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define DMA_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_HANDSHAKE -#define HSH_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define HSH_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_INFORMATION -#define INF_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define INF_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_INIT -#define INI_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define INI_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_INTR -#define INT_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define INT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_LINKED -#define LNK_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define LNK_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_MAIN -#define MAIN_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define MAIN_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_NO_DATAOUT -#define NDAT_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define NDAT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_NO_WRITE -#define NWR_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define NWR_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_PIO -#define PIO_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define PIO_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_PSEUDO_DMA -#define PDMA_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define PDMA_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_QUEUES -#define QU_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define QU_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_RESELECTION -#define RSL_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define RSL_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_SELECTION -#define SEL_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define SEL_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_USLEEP -#define USL_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define USL_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_LAST_BYTE_SENT -#define LBS_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define LBS_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_RESTART_SELECT -#define RSS_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define RSS_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_EXTENDED -#define EXT_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define EXT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_ABORT -#define ABRT_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define ABRT_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_TAGS -#define TAG_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define TAG_PRINTK(format, args...) -#endif -#if NDEBUG & NDEBUG_MERGING -#define MER_PRINTK(format, args...) \ -	printk(KERN_DEBUG format , ## args) -#else -#define MER_PRINTK(format, args...) -#endif - -/* conditional macros for NCR5380_print_{,phase,status} */ - -#define NCR_PRINT(mask)	\ -	((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0) - -#define NCR_PRINT_PHASE(mask) \ -	((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0) - -#define NCR_PRINT_STATUS(mask) \ -	((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0) - - - -#endif /* ndef HOSTS_C */ -#endif /* SUN3_NCR5380_H */ +#endif /* SUN3_SCSI_H */ diff --git a/drivers/scsi/sun3_scsi_vme.c b/drivers/scsi/sun3_scsi_vme.c index a3dd55d1d2f..1eeece6e204 100644 --- a/drivers/scsi/sun3_scsi_vme.c +++ b/drivers/scsi/sun3_scsi_vme.c @@ -1,589 +1,3 @@ - /* - * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl) - * - * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) - * - * VME support added by Sam Creasey - * - * Adapted from sun3_scsi.c -- see there for other headers - * - * TODO: modify this driver to support multiple Sun3 SCSI VME boards - * - */ - -#define AUTOSENSE - -#include <linux/types.h> -#include <linux/stddef.h> -#include <linux/ctype.h> -#include <linux/delay.h> - -#include <linux/module.h> -#include <linux/signal.h> -#include <linux/ioport.h> -#include <linux/init.h> -#include <linux/blkdev.h> - -#include <asm/io.h> - -#include <asm/sun3ints.h> -#include <asm/dvma.h> -#include <asm/idprom.h> -#include <asm/machines.h> -  #define SUN3_SCSI_VME -#undef SUN3_SCSI_DEBUG - -/* dma on! */ -#define REAL_DMA - -#define NDEBUG 0 - -#define NDEBUG_ABORT		0x00100000 -#define NDEBUG_TAGS		0x00200000 -#define NDEBUG_MERGING		0x00400000 - -#include "scsi.h" -#include "initio.h" -#include <scsi/scsi_host.h> -#include "sun3_scsi.h" - -extern int sun3_map_test(unsigned long, char *); - -#define USE_WRAPPER -/*#define RESET_BOOT */ -#define DRIVER_SETUP - -/* - * BUG can be used to trigger a strange code-size related hang on 2.1 kernels - */ -#ifdef BUG -#undef RESET_BOOT -#undef DRIVER_SETUP -#endif - -/* #define SUPPORT_TAGS */ - -//#define	ENABLE_IRQ()	enable_irq( SUN3_VEC_VMESCSI0 );  -#define ENABLE_IRQ() - - -static irqreturn_t scsi_sun3_intr(int irq, void *dummy); -static inline unsigned char sun3scsi_read(int reg); -static inline void sun3scsi_write(int reg, int value); - -static int setup_can_queue = -1; -module_param(setup_can_queue, int, 0); -static int setup_cmd_per_lun = -1; -module_param(setup_cmd_per_lun, int, 0); -static int setup_sg_tablesize = -1; -module_param(setup_sg_tablesize, int, 0); -#ifdef SUPPORT_TAGS -static int setup_use_tagged_queuing = -1; -module_param(setup_use_tagged_queuing, int, 0); -#endif -static int setup_hostid = -1; -module_param(setup_hostid, int, 0); - -static struct scsi_cmnd *sun3_dma_setup_done = NULL; - -#define	AFTER_RESET_DELAY	(HZ/2) - -/* ms to wait after hitting dma regs */ -#define SUN3_DMA_DELAY 10 - -/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ -#define SUN3_DVMA_BUFSIZE 0xe000 - -/* minimum number of bytes to do dma on */ -#define SUN3_DMA_MINSIZE 128 - -static volatile unsigned char *sun3_scsi_regp; -static volatile struct sun3_dma_regs *dregs; -#ifdef OLDDMA -static unsigned char *dmabuf = NULL; /* dma memory buffer */ -#endif -static unsigned char *sun3_dma_orig_addr = NULL; -static unsigned long sun3_dma_orig_count = 0; -static int sun3_dma_active = 0; -static unsigned long last_residual = 0; - -/* - * NCR 5380 register access functions - */ - -static inline unsigned char sun3scsi_read(int reg) -{ -	return( sun3_scsi_regp[reg] ); -} - -static inline void sun3scsi_write(int reg, int value) -{ -	sun3_scsi_regp[reg] = value; -} - -/* - * XXX: status debug - */ -static struct Scsi_Host *default_instance; - -/* - * Function : int sun3scsi_detect(struct scsi_host_template * tpnt) - * - * Purpose : initializes mac NCR5380 driver based on the - *	command line / compile time port and irq definitions. - * - * Inputs : tpnt - template for this SCSI adapter. - * - * Returns : 1 if a host adapter was found, 0 if not. - * - */ -  -static int __init sun3scsi_detect(struct scsi_host_template * tpnt) -{ -	unsigned long ioaddr, irq = 0; -	static int called = 0; -	struct Scsi_Host *instance; -	int i; -	unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI,  -				   IOBASE_SUN3_VMESCSI + 0x4000, -				   0 }; -	unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, -				  SUN3_VEC_VMESCSI1, -				  0 }; -	/* check that this machine has an onboard 5380 */ -	switch(idprom->id_machtype) { -	case SM_SUN3|SM_3_160: -	case SM_SUN3|SM_3_260: -		break; - -	default: -		return 0; -	} - -	if(called) -		return 0; - -	tpnt->proc_name = "Sun3 5380 VME SCSI"; - -	/* setup variables */ -	tpnt->can_queue = -		(setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; -	tpnt->cmd_per_lun = -		(setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; -	tpnt->sg_tablesize =  -		(setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; -	 -	if (setup_hostid >= 0) -		tpnt->this_id = setup_hostid; -	else { -		/* use 7 as default */ -		tpnt->this_id = 7; -	} -	 -	ioaddr = 0; -	for(i = 0; addrs[i] != 0; i++) { -		unsigned char x; -		 -		ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, -						     SUN3_PAGE_TYPE_VME16); -		irq = vecs[i]; -		sun3_scsi_regp = (unsigned char *)ioaddr; -		 -		dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); -		 -		if(sun3_map_test((unsigned long)dregs, &x)) { -			unsigned short oldcsr; - -			oldcsr = dregs->csr; -			dregs->csr = 0; -			udelay(SUN3_DMA_DELAY); -			if(dregs->csr == 0x1400) -				break; -			 -			dregs->csr = oldcsr; -		} - -		iounmap((void *)ioaddr); -		ioaddr = 0; -	} - -	if(!ioaddr) -		return 0; -	 -#ifdef SUPPORT_TAGS -	if (setup_use_tagged_queuing < 0) -		setup_use_tagged_queuing = USE_TAGGED_QUEUING; -#endif - -	instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); -	if(instance == NULL) -		return 0; -		 -	default_instance = instance; - -        instance->io_port = (unsigned long) ioaddr; -	instance->irq = irq; - -	NCR5380_init(instance, 0); - -	instance->n_io_port = 32; - -        ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; - -	if (request_irq(instance->irq, scsi_sun3_intr, -			0, "Sun3SCSI-5380VME", instance)) { -#ifndef REAL_DMA -		printk("scsi%d: IRQ%d not free, interrupts disabled\n", -		       instance->host_no, instance->irq); -		instance->irq = SCSI_IRQ_NONE; -#else -		printk("scsi%d: IRQ%d not free, bailing out\n", -		       instance->host_no, instance->irq); -		return 0; -#endif -	} - -	printk("scsi%d: Sun3 5380 VME at port %lX irq", instance->host_no, instance->io_port); -	if (instance->irq == SCSI_IRQ_NONE) -		printk ("s disabled"); -	else -		printk (" %d", instance->irq); -	printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", -	       instance->can_queue, instance->cmd_per_lun, -	       SUN3SCSI_PUBLIC_RELEASE); -	printk("\nscsi%d:", instance->host_no); -	NCR5380_print_options(instance); -	printk("\n"); - -	dregs->csr = 0; -	udelay(SUN3_DMA_DELAY); -	dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; -	udelay(SUN3_DMA_DELAY); -	dregs->fifo_count = 0; -	dregs->fifo_count_hi = 0; -	dregs->dma_addr_hi = 0; -	dregs->dma_addr_lo = 0; -	dregs->dma_count_hi = 0; -	dregs->dma_count_lo = 0; - -	dregs->ivect = VME_DATA24 | (instance->irq & 0xff); - -	called = 1; - -#ifdef RESET_BOOT -	sun3_scsi_reset_boot(instance); -#endif - -	return 1; -} - -int sun3scsi_release (struct Scsi_Host *shpnt) -{ -	if (shpnt->irq != SCSI_IRQ_NONE) -		free_irq(shpnt->irq, shpnt); - -	iounmap((void *)sun3_scsi_regp); - -	NCR5380_exit(shpnt); -	return 0; -} - -#ifdef RESET_BOOT -/* - * Our 'bus reset on boot' function - */ - -static void sun3_scsi_reset_boot(struct Scsi_Host *instance) -{ -	unsigned long end; - -	NCR5380_local_declare(); -	NCR5380_setup(instance); -	 -	/* -	 * Do a SCSI reset to clean up the bus during initialization. No -	 * messing with the queues, interrupts, or locks necessary here. -	 */ - -	printk( "Sun3 SCSI: resetting the SCSI bus..." ); - -	/* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ -//       	sun3_disable_irq( IRQ_SUN3_SCSI ); - -	/* get in phase */ -	NCR5380_write( TARGET_COMMAND_REG, -		      PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); - -	/* assert RST */ -	NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); - -	/* The min. reset hold time is 25us, so 40us should be enough */ -	udelay( 50 ); - -	/* reset RST and interrupt */ -	NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); -	NCR5380_read( RESET_PARITY_INTERRUPT_REG ); - -	for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) -		barrier(); - -	/* switch on SCSI IRQ again */ -//       	sun3_enable_irq( IRQ_SUN3_SCSI ); - -	printk( " done\n" ); -} -#endif - -static const char * sun3scsi_info (struct Scsi_Host *spnt) { -    return ""; -} - -// safe bits for the CSR -#define CSR_GOOD 0x060f - -static irqreturn_t scsi_sun3_intr(int irq, void *dummy) -{ -	unsigned short csr = dregs->csr; -	int handled = 0; - -	dregs->csr &= ~CSR_DMA_ENABLE; - - -#ifdef SUN3_SCSI_DEBUG -	printk("scsi_intr csr %x\n", csr); -#endif - -	if(csr & ~CSR_GOOD) { -		if(csr & CSR_DMA_BUSERR) { -			printk("scsi%d: bus error in dma\n", default_instance->host_no); -#ifdef SUN3_SCSI_DEBUG -			printk("scsi: residual %x count %x addr %p dmaaddr %x\n",  -			       dregs->fifo_count, -			       dregs->dma_count_lo | (dregs->dma_count_hi << 16), -			       sun3_dma_orig_addr, -			       dregs->dma_addr_lo | (dregs->dma_addr_hi << 16)); -#endif -		} - -		if(csr & CSR_DMA_CONFLICT) { -			printk("scsi%d: dma conflict\n", default_instance->host_no); -		} -		handled = 1; -	} - -	if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { -		NCR5380_intr(irq, dummy); -		handled = 1; -	} - -	return IRQ_RETVAL(handled); -} - -/* - * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk;  - * reentering NCR5380_print_status seems to have ugly side effects - */ - -/* this doesn't seem to get used at all -- sam */ -#if 0 -void sun3_sun3_debug (void) -{ -	unsigned long flags; -	NCR5380_local_declare(); - -	if (default_instance) { -			local_irq_save(flags); -			NCR5380_print_status(default_instance); -			local_irq_restore(flags); -	} -} -#endif - - -/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ -static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) -{ -	void *addr; - -	if(sun3_dma_orig_addr != NULL) -		dvma_unmap(sun3_dma_orig_addr); - -//	addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); -	addr = (void *)dvma_map_vme((unsigned long) data, count); -		 -	sun3_dma_orig_addr = addr; -	sun3_dma_orig_count = count; -	 -#ifdef SUN3_SCSI_DEBUG -	printk("scsi: dma_setup addr %p count %x\n", addr, count); -#endif - -//	dregs->fifo_count = 0; -#if 0	 -	/* reset fifo */ -	dregs->csr &= ~CSR_FIFO; -	dregs->csr |= CSR_FIFO; -#endif	 -	/* set direction */ -	if(write_flag) -		dregs->csr |= CSR_SEND; -	else -		dregs->csr &= ~CSR_SEND; -	 -	/* reset fifo */ -//	dregs->csr &= ~CSR_FIFO; -//	dregs->csr |= CSR_FIFO; - -	dregs->csr |= CSR_PACK_ENABLE; - -	dregs->dma_addr_hi = ((unsigned long)addr >> 16); -	dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); -	 -	dregs->dma_count_hi = 0; -	dregs->dma_count_lo = 0; -	dregs->fifo_count_hi = 0; -	dregs->fifo_count = 0; -		 -#ifdef SUN3_SCSI_DEBUG -	printk("scsi: dma_setup done csr %x\n", dregs->csr); -#endif -       	return count; - -} - -static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) -{ -	return last_residual; -} - -static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, -						  struct scsi_cmnd *cmd, -						  int write_flag) -{ -	if (cmd->request->cmd_type == REQ_TYPE_FS) - 		return wanted; -	else -		return 0; -} - -static int sun3scsi_dma_start(unsigned long count, char *data) -{ -	 -	unsigned short csr; - -	csr = dregs->csr; -#ifdef SUN3_SCSI_DEBUG -	printk("scsi: dma_start data %p count %x csr %x fifo %x\n", data, count, csr, dregs->fifo_count); -#endif -	 -	dregs->dma_count_hi = (sun3_dma_orig_count >> 16); -	dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); - -	dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); -	dregs->fifo_count = (sun3_dma_orig_count & 0xffff); - -//	if(!(csr & CSR_DMA_ENABLE)) -//		dregs->csr |= CSR_DMA_ENABLE; - -	return 0; -} - -/* clean up after our dma is done */ -static int sun3scsi_dma_finish(int write_flag) -{ -	unsigned short fifo; -	int ret = 0; -	 -	sun3_dma_active = 0; - -	dregs->csr &= ~CSR_DMA_ENABLE; -	 -	fifo = dregs->fifo_count; -	if(write_flag) { -		if((fifo > 0) && (fifo < sun3_dma_orig_count)) -			fifo++; -	} - -	last_residual = fifo; -#ifdef SUN3_SCSI_DEBUG -	printk("scsi: residual %x total %x\n", fifo, sun3_dma_orig_count); -#endif -	/* empty bytes from the fifo which didn't make it */ -	if((!write_flag) && (dregs->csr & CSR_LEFT)) { -		unsigned char *vaddr; - -#ifdef SUN3_SCSI_DEBUG -		printk("scsi: got left over bytes\n"); -#endif - -		vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); -		 -		vaddr += (sun3_dma_orig_count - fifo); -		vaddr--; -		 -		switch(dregs->csr & CSR_LEFT) { -		case CSR_LEFT_3: -			*vaddr = (dregs->bpack_lo & 0xff00) >> 8; -			vaddr--; -			 -		case CSR_LEFT_2: -			*vaddr = (dregs->bpack_hi & 0x00ff); -			vaddr--; -			 -		case CSR_LEFT_1: -			*vaddr = (dregs->bpack_hi & 0xff00) >> 8; -			break; -		} -		 -		 -	} - -	dvma_unmap(sun3_dma_orig_addr); -	sun3_dma_orig_addr = NULL; - -	dregs->dma_addr_hi = 0; -	dregs->dma_addr_lo = 0; -	dregs->dma_count_hi = 0; -	dregs->dma_count_lo = 0; - -	dregs->fifo_count = 0; -	dregs->fifo_count_hi = 0; - -	dregs->csr &= ~CSR_SEND; -	 -//	dregs->csr |= CSR_DMA_ENABLE; -	 -#if 0 -	/* reset fifo */ -	dregs->csr &= ~CSR_FIFO; -	dregs->csr |= CSR_FIFO; -#endif	 -	sun3_dma_setup_done = NULL; - -	return ret; - -} - -#include "sun3_NCR5380.c" - -static struct scsi_host_template driver_template = { -	.name			= SUN3_SCSI_NAME, -	.detect			= sun3scsi_detect, -	.release		= sun3scsi_release, -	.info			= sun3scsi_info, -	.queuecommand		= sun3scsi_queue_command, -	.eh_abort_handler      	= sun3scsi_abort, -	.eh_bus_reset_handler  	= sun3scsi_bus_reset, -	.can_queue		= CAN_QUEUE, -	.this_id		= 7, -	.sg_tablesize		= SG_TABLESIZE, -	.cmd_per_lun		= CMD_PER_LUN, -	.use_clustering		= DISABLE_CLUSTERING -}; - - -#include "scsi_module.c" - -MODULE_LICENSE("GPL"); +#include "sun3_scsi.c" diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index bac55f7f69f..6d3ee1ab636 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -1531,7 +1531,7 @@ static int sym_iomap_device(struct sym_device *device)  	struct pci_bus_region bus_addr;  	int i = 2; -	pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[1]); +	pcibios_resource_to_bus(pdev->bus, &bus_addr, &pdev->resource[1]);  	device->mmio_base = bus_addr.start;  	if (device->chip.features & FE_RAM) { @@ -1541,7 +1541,8 @@ static int sym_iomap_device(struct sym_device *device)  		 */  		if (!pdev->resource[i].flags)  			i++; -		pcibios_resource_to_bus(pdev, &bus_addr, &pdev->resource[i]); +		pcibios_resource_to_bus(pdev->bus, &bus_addr, +					&pdev->resource[i]);  		device->ram_base = bus_addr.start;  	} diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h index b80bf709f10..805369521df 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.h +++ b/drivers/scsi/sym53c8xx_2/sym_glue.h @@ -174,7 +174,7 @@ struct sym_slcb {   */  struct sym_shcb {  	/* -	 *  Chip and controller indentification. +	 *  Chip and controller identification.  	 */  	int		unit;  	char		inst_name[16]; diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index d92fe4037e9..6b349e30186 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c @@ -3000,7 +3000,11 @@ sym_dequeue_from_squeue(struct sym_hcb *np, int i, int target, int lun, int task  		if ((target == -1 || cp->target == target) &&  		    (lun    == -1 || cp->lun    == lun)    &&  		    (task   == -1 || cp->tag    == task)) { +#ifdef SYM_OPT_HANDLE_DEVICE_QUEUEING  			sym_set_cam_status(cp->cmd, DID_SOFT_ERROR); +#else +			sym_set_cam_status(cp->cmd, DID_REQUEUE); +#endif  			sym_remque(&cp->link_ccbq);  			sym_insque_tail(&cp->link_ccbq, &np->comp_ccbq);  		} diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c index f1e4b4148c7..8cc80931df1 100644 --- a/drivers/scsi/t128.c +++ b/drivers/scsi/t128.c @@ -102,10 +102,6 @@   * 15 9-11   */ -/* - * $Log: t128.c,v $ - */ -  #include <linux/signal.h>  #include <linux/io.h>  #include <linux/blkdev.h> @@ -259,7 +255,7 @@ found:  	    instance->irq = NCR5380_probe_irq(instance, T128_IRQS);  	if (instance->irq != SCSI_IRQ_NONE)  -	    if (request_irq(instance->irq, t128_intr, IRQF_DISABLED, "t128", +	    if (request_irq(instance->irq, t128_intr, 0, "t128",  			    instance)) {  		printk("scsi%d : IRQ%d not free, interrupts disabled\n",   		    instance->host_no, instance->irq); diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index 1df82c28e56..fd68cecc62a 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h @@ -34,10 +34,6 @@   * 1+ (800) 334-5454   */ -/* - * $Log: t128.h,v $ - */ -  #ifndef T128_H  #define T128_H @@ -107,8 +103,6 @@ static int t128_bus_reset(struct scsi_cmnd *);  #define CAN_QUEUE 32  #endif -#ifndef HOSTS_C -  #define NCR5380_implementation_fields \      void __iomem *base @@ -148,6 +142,5 @@ static int t128_bus_reset(struct scsi_cmnd *);  #define T128_IRQS 0xc4a8 -#endif /* else def HOSTS_C */  #endif /* ndef ASM */  #endif /* T128_H */ diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index 9327f5fcec4..b006cf789ba 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c @@ -521,7 +521,7 @@ dc390_StartSCSI( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_sr  	pACB->SelConn++;  	return 1;      } -    if (time_before (jiffies, pACB->pScsiHost->last_reset)) +    if (time_before (jiffies, pACB->last_reset))      {  	DEBUG0(printk ("DC390: We were just reset and don't accept commands yet!\n"));  	return 1; @@ -1863,7 +1863,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB )      /* delay half a second */      udelay (1000);      DC390_write8 (ScsiCmd, CLEAR_FIFO_CMD); -    pACB->pScsiHost->last_reset = jiffies + 5*HZ/2 +    pACB->last_reset = jiffies + 5*HZ/2  		    + HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];      pACB->Connected = 0; @@ -2048,9 +2048,9 @@ static int DC390_bus_reset (struct scsi_cmnd *cmd)  	dc390_ResetDevParam(pACB);  	mdelay(1); -	pACB->pScsiHost->last_reset = jiffies + 3*HZ/2  +	pACB->last_reset = jiffies + 3*HZ/2  		+ HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY]; -     +  	DC390_write8(ScsiCmd, CLEAR_FIFO_CMD);  	DC390_read8(INT_Status);		/* Reset Pending INT */ @@ -2383,7 +2383,7 @@ static void dc390_init_hw(struct dc390_acb *pACB, u8 index)  	if (pACB->Gmode2 & RST_SCSI_BUS) {  		dc390_ResetSCSIBus(pACB);  		udelay(1000); -		shost->last_reset = jiffies + HZ/2 + +		pACB->last_reset = jiffies + HZ/2 +  			HZ * dc390_eepromBuf[pACB->AdapterIndex][EE_DELAY];  	} @@ -2455,8 +2455,8 @@ static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)  	shost->irq = pdev->irq;  	shost->base = io_port;  	shost->unique_id = io_port; -	shost->last_reset = jiffies; -	 + +	pACB->last_reset = jiffies;  	pACB->pScsiHost = shost;  	pACB->IOPortBase = (u16) io_port;  	pACB->IRQLevel = pdev->irq; @@ -2553,7 +2553,6 @@ static void dc390_remove_one(struct pci_dev *dev)  	pci_disable_device(dev);  	scsi_host_put(scsi_host); -	pci_set_drvdata(dev, NULL);  }  static struct pci_device_id tmscsim_pci_tbl[] = { diff --git a/drivers/scsi/tmscsim.h b/drivers/scsi/tmscsim.h index 77adc54dbd1..3d1bb4ad182 100644 --- a/drivers/scsi/tmscsim.h +++ b/drivers/scsi/tmscsim.h @@ -143,6 +143,7 @@ u8		Ignore_IRQ;	/* Not used */  struct pci_dev	*pdev; +unsigned long   last_reset;  unsigned long	Cmds;  u32		SelLost;  u32		SelConn; diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 9c216e56356..5a03bb3bcfe 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c @@ -873,7 +873,7 @@ static int port_detect \     /* Board detected, allocate its IRQ */     if (request_irq(irq, do_interrupt_handler, -             IRQF_DISABLED | ((subversion == ESA) ? IRQF_SHARED : 0), +             (subversion == ESA) ? IRQF_SHARED : 0,               driver_name, (void *) &sha[j])) {        printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);        goto freelock; diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 72105009052..f42d1cee652 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -196,9 +196,9 @@ enum {   * @dword_2: UPIU header DW-2   */  struct utp_upiu_header { -	u32 dword_0; -	u32 dword_1; -	u32 dword_2; +	__be32 dword_0; +	__be32 dword_1; +	__be32 dword_2;  };  /** @@ -207,7 +207,7 @@ struct utp_upiu_header {   * @cdb: Command Descriptor Block CDB DW-4 to DW-7   */  struct utp_upiu_cmd { -	u32 exp_data_transfer_len; +	__be32 exp_data_transfer_len;  	u8 cdb[MAX_CDB_SIZE];  }; @@ -228,10 +228,10 @@ struct utp_upiu_query {  	u8 idn;  	u8 index;  	u8 selector; -	u16 reserved_osf; -	u16 length; -	u32 value; -	u32 reserved[2]; +	__be16 reserved_osf; +	__be16 length; +	__be32 value; +	__be32 reserved[2];  };  /** @@ -256,9 +256,9 @@ struct utp_upiu_req {   * @sense_data: Sense data field DW-8 to DW-12   */  struct utp_cmd_rsp { -	u32 residual_transfer_count; -	u32 reserved[4]; -	u16 sense_data_len; +	__be32 residual_transfer_count; +	__be32 reserved[4]; +	__be16 sense_data_len;  	u8 sense_data[18];  }; @@ -286,10 +286,10 @@ struct utp_upiu_rsp {   */  struct utp_upiu_task_req {  	struct utp_upiu_header header; -	u32 input_param1; -	u32 input_param2; -	u32 input_param3; -	u32 reserved[2]; +	__be32 input_param1; +	__be32 input_param2; +	__be32 input_param3; +	__be32 reserved[2];  };  /** @@ -301,9 +301,9 @@ struct utp_upiu_task_req {   */  struct utp_upiu_task_rsp {  	struct utp_upiu_header header; -	u32 output_param1; -	u32 output_param2; -	u32 reserved[3]; +	__be32 output_param1; +	__be32 output_param2; +	__be32 reserved[3];  };  /** diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index a823cf44e94..8b9531204c2 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -132,7 +132,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)  	pm_runtime_forbid(&pdev->dev);  	pm_runtime_get_noresume(&pdev->dev);  	ufshcd_remove(hba); -	pci_set_drvdata(pdev, NULL);  }  /** diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 04884d663e4..0c287725125 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -55,6 +55,9 @@  /* Query request timeout */  #define QUERY_REQ_TIMEOUT 30 /* msec */ +/* Task management command timeout */ +#define TM_CMD_TIMEOUT	100 /* msecs */ +  /* Expose the flag value from utp_upiu_query.value */  #define MASK_QUERY_UPIU_FLAG_LOC 0xFF @@ -71,9 +74,22 @@ enum {  /* UFSHCD states */  enum { -	UFSHCD_STATE_OPERATIONAL,  	UFSHCD_STATE_RESET,  	UFSHCD_STATE_ERROR, +	UFSHCD_STATE_OPERATIONAL, +}; + +/* UFSHCD error handling flags */ +enum { +	UFSHCD_EH_IN_PROGRESS = (1 << 0), +}; + +/* UFSHCD UIC layer error flags */ +enum { +	UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */ +	UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */ +	UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */ +	UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */  };  /* Interrupt configuration options */ @@ -83,6 +99,18 @@ enum {  	UFSHCD_INT_CLEAR,  }; +#define ufshcd_set_eh_in_progress(h) \ +	(h->eh_flags |= UFSHCD_EH_IN_PROGRESS) +#define ufshcd_eh_in_progress(h) \ +	(h->eh_flags & UFSHCD_EH_IN_PROGRESS) +#define ufshcd_clear_eh_in_progress(h) \ +	(h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) + +static void ufshcd_tmc_handler(struct ufs_hba *hba); +static void ufshcd_async_scan(void *data, async_cookie_t cookie); +static int ufshcd_reset_and_restore(struct ufs_hba *hba); +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); +  /*   * ufshcd_wait_for_register - wait for register value to change   * @hba - per-adapter interface @@ -163,7 +191,7 @@ static inline int ufshcd_is_device_present(u32 reg_hcs)   */  static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)  { -	return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS; +	return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;  }  /** @@ -176,19 +204,41 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)  static inline int  ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)  { -	return task_req_descp->header.dword_2 & MASK_OCS; +	return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;  }  /**   * ufshcd_get_tm_free_slot - get a free slot for task management request   * @hba: per adapter instance + * @free_slot: pointer to variable with available slot value   * - * Returns maximum number of task management request slots in case of - * task management queue full or returns the free slot number + * Get a free tag and lock it until ufshcd_put_tm_slot() is called. + * Returns 0 if free slot is not available, else return 1 with tag value + * in @free_slot.   */ -static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba) +static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)  { -	return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs); +	int tag; +	bool ret = false; + +	if (!free_slot) +		goto out; + +	do { +		tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); +		if (tag >= hba->nutmrs) +			goto out; +	} while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); + +	*free_slot = tag; +	ret = true; +out: +	return ret; +} + +static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) +{ +	clear_bit_unlock(slot, &hba->tm_slots_in_use);  }  /** @@ -390,26 +440,6 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)  }  /** - * ufshcd_query_to_cpu() - formats the buffer to native cpu endian - * @response: upiu query response to convert - */ -static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response) -{ -	response->length = be16_to_cpu(response->length); -	response->value = be32_to_cpu(response->value); -} - -/** - * ufshcd_query_to_be() - formats the buffer to big endian - * @request: upiu query request to convert - */ -static inline void ufshcd_query_to_be(struct utp_upiu_query *request) -{ -	request->length = cpu_to_be16(request->length); -	request->value = cpu_to_be32(request->value); -} - -/**   * ufshcd_copy_query_response() - Copy the Query Response and the data   * descriptor   * @hba: per adapter instance @@ -425,7 +455,6 @@ void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)  			UPIU_RSP_CODE_OFFSET;  	memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); -	ufshcd_query_to_cpu(&query_res->upiu_res);  	/* Get the descriptor */ @@ -749,7 +778,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,  {  	struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;  	struct ufs_query *query = &hba->dev_cmd.query; -	u16 len = query->request.upiu_req.length; +	u16 len = be16_to_cpu(query->request.upiu_req.length);  	u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;  	/* Query request header */ @@ -766,7 +795,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,  	/* Copy the Query Request buffer as is */  	memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,  			QUERY_OSF_SIZE); -	ufshcd_query_to_be(&ucd_req_ptr->qr);  	/* Copy the Descriptor */  	if ((len > 0) && (query->request.upiu_req.opcode == @@ -853,10 +881,25 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)  	tag = cmd->request->tag; -	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) { +	spin_lock_irqsave(hba->host->host_lock, flags); +	switch (hba->ufshcd_state) { +	case UFSHCD_STATE_OPERATIONAL: +		break; +	case UFSHCD_STATE_RESET:  		err = SCSI_MLQUEUE_HOST_BUSY; -		goto out; +		goto out_unlock; +	case UFSHCD_STATE_ERROR: +		set_host_byte(cmd, DID_ERROR); +		cmd->scsi_done(cmd); +		goto out_unlock; +	default: +		dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n", +				__func__, hba->ufshcd_state); +		set_host_byte(cmd, DID_BAD_TARGET); +		cmd->scsi_done(cmd); +		goto out_unlock;  	} +	spin_unlock_irqrestore(hba->host->host_lock, flags);  	/* acquire the tag to make sure device cmds don't use it */  	if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { @@ -893,6 +936,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)  	/* issue command to the controller */  	spin_lock_irqsave(hba->host->host_lock, flags);  	ufshcd_send_command(hba, tag); +out_unlock:  	spin_unlock_irqrestore(hba->host->host_lock, flags);  out:  	return err; @@ -1151,7 +1195,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,  	}  	if (flag_res) -		*flag_res = (response->upiu_res.value & +		*flag_res = (be32_to_cpu(response->upiu_res.value) &  				MASK_QUERY_UPIU_FLAG_LOC) & 0x1;  out_unlock: @@ -1170,7 +1214,7 @@ out_unlock:   *   * Returns 0 for success, non-zero in case of failure  */ -int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, +static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,  			enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)  {  	struct ufs_query_req *request; @@ -1195,7 +1239,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,  	switch (opcode) {  	case UPIU_QUERY_OPCODE_WRITE_ATTR:  		request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; -		request->upiu_req.value = *attr_val; +		request->upiu_req.value = cpu_to_be32(*attr_val);  		break;  	case UPIU_QUERY_OPCODE_READ_ATTR:  		request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; @@ -1222,7 +1266,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,  		goto out_unlock;  	} -	*attr_val = response->upiu_res.value; +	*attr_val = be32_to_cpu(response->upiu_res.value);  out_unlock:  	mutex_unlock(&hba->dev_cmd.lock); @@ -1481,7 +1525,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);   *   * Returns 0 on success, non-zero value on failure   */ -int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) +static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)  {  	struct uic_command uic_cmd = {0};  	struct completion pwr_done; @@ -1701,11 +1745,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)  		goto out;  	} -	if (hba->ufshcd_state == UFSHCD_STATE_RESET) -		scsi_unblock_requests(hba->host); - -	hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; -  out:  	return err;  } @@ -1831,66 +1870,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)  }  /** - * ufshcd_do_reset - reset the host controller - * @hba: per adapter instance - * - * Returns SUCCESS/FAILED - */ -static int ufshcd_do_reset(struct ufs_hba *hba) -{ -	struct ufshcd_lrb *lrbp; -	unsigned long flags; -	int tag; - -	/* block commands from midlayer */ -	scsi_block_requests(hba->host); - -	spin_lock_irqsave(hba->host->host_lock, flags); -	hba->ufshcd_state = UFSHCD_STATE_RESET; - -	/* send controller to reset state */ -	ufshcd_hba_stop(hba); -	spin_unlock_irqrestore(hba->host->host_lock, flags); - -	/* abort outstanding commands */ -	for (tag = 0; tag < hba->nutrs; tag++) { -		if (test_bit(tag, &hba->outstanding_reqs)) { -			lrbp = &hba->lrb[tag]; -			if (lrbp->cmd) { -				scsi_dma_unmap(lrbp->cmd); -				lrbp->cmd->result = DID_RESET << 16; -				lrbp->cmd->scsi_done(lrbp->cmd); -				lrbp->cmd = NULL; -				clear_bit_unlock(tag, &hba->lrb_in_use); -			} -		} -	} - -	/* complete device management command */ -	if (hba->dev_cmd.complete) -		complete(hba->dev_cmd.complete); - -	/* clear outstanding request/task bit maps */ -	hba->outstanding_reqs = 0; -	hba->outstanding_tasks = 0; - -	/* Host controller enable */ -	if (ufshcd_hba_enable(hba)) { -		dev_err(hba->dev, -			"Reset: Controller initialization failed\n"); -		return FAILED; -	} - -	if (ufshcd_link_startup(hba)) { -		dev_err(hba->dev, -			"Reset: Link start-up failed\n"); -		return FAILED; -	} - -	return SUCCESS; -} - -/**   * ufshcd_slave_alloc - handle initial SCSI device configurations   * @sdev: pointer to SCSI device   * @@ -1907,6 +1886,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)  	sdev->use_10_for_ms = 1;  	scsi_set_tag_type(sdev, MSG_SIMPLE_TAG); +	/* allow SCSI layer to restart the device in case of errors */ +	sdev->allow_restart = 1; +  	/*  	 * Inform SCSI Midlayer that the LUN queue depth is same as the  	 * controller queue depth. If a LUN queue depth is less than the @@ -1934,10 +1916,11 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)   * ufshcd_task_req_compl - handle task management request completion   * @hba: per adapter instance   * @index: index of the completed request + * @resp: task management service response   * - * Returns SUCCESS/FAILED + * Returns non-zero value on error, zero on success   */ -static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index) +static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)  {  	struct utp_task_req_desc *task_req_descp;  	struct utp_upiu_task_rsp *task_rsp_upiup; @@ -1958,19 +1941,15 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)  				task_req_descp[index].task_rsp_upiu;  		task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);  		task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); - -		if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL && -		    task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) -			task_result = FAILED; -		else -			task_result = SUCCESS; +		if (resp) +			*resp = (u8)task_result;  	} else { -		task_result = FAILED; -		dev_err(hba->dev, -			"trc: Invalid ocs = %x\n", ocs_value); +		dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", +				__func__, ocs_value);  	}  	spin_unlock_irqrestore(hba->host->host_lock, flags); -	return task_result; + +	return ocs_value;  }  /** @@ -2105,6 +2084,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)  	case OCS_ABORTED:  		result |= DID_ABORT << 16;  		break; +	case OCS_INVALID_COMMAND_STATUS: +		result |= DID_REQUEUE << 16; +		break;  	case OCS_INVALID_CMD_TABLE_ATTR:  	case OCS_INVALID_PRDT_ATTR:  	case OCS_MISMATCH_DATA_BUF_SIZE: @@ -2422,41 +2404,145 @@ out:  }  /** - * ufshcd_fatal_err_handler - handle fatal errors - * @hba: per adapter instance + * ufshcd_err_handler - handle UFS errors that require s/w attention + * @work: pointer to work structure   */ -static void ufshcd_fatal_err_handler(struct work_struct *work) +static void ufshcd_err_handler(struct work_struct *work)  {  	struct ufs_hba *hba; -	hba = container_of(work, struct ufs_hba, feh_workq); +	unsigned long flags; +	u32 err_xfer = 0; +	u32 err_tm = 0; +	int err = 0; +	int tag; + +	hba = container_of(work, struct ufs_hba, eh_work);  	pm_runtime_get_sync(hba->dev); -	/* check if reset is already in progress */ -	if (hba->ufshcd_state != UFSHCD_STATE_RESET) -		ufshcd_do_reset(hba); + +	spin_lock_irqsave(hba->host->host_lock, flags); +	if (hba->ufshcd_state == UFSHCD_STATE_RESET) { +		spin_unlock_irqrestore(hba->host->host_lock, flags); +		goto out; +	} + +	hba->ufshcd_state = UFSHCD_STATE_RESET; +	ufshcd_set_eh_in_progress(hba); + +	/* Complete requests that have door-bell cleared by h/w */ +	ufshcd_transfer_req_compl(hba); +	ufshcd_tmc_handler(hba); +	spin_unlock_irqrestore(hba->host->host_lock, flags); + +	/* Clear pending transfer requests */ +	for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) +		if (ufshcd_clear_cmd(hba, tag)) +			err_xfer |= 1 << tag; + +	/* Clear pending task management requests */ +	for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) +		if (ufshcd_clear_tm_cmd(hba, tag)) +			err_tm |= 1 << tag; + +	/* Complete the requests that are cleared by s/w */ +	spin_lock_irqsave(hba->host->host_lock, flags); +	ufshcd_transfer_req_compl(hba); +	ufshcd_tmc_handler(hba); +	spin_unlock_irqrestore(hba->host->host_lock, flags); + +	/* Fatal errors need reset */ +	if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) || +			((hba->saved_err & UIC_ERROR) && +			 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) { +		err = ufshcd_reset_and_restore(hba); +		if (err) { +			dev_err(hba->dev, "%s: reset and restore failed\n", +					__func__); +			hba->ufshcd_state = UFSHCD_STATE_ERROR; +		} +		/* +		 * Inform scsi mid-layer that we did reset and allow to handle +		 * Unit Attention properly. +		 */ +		scsi_report_bus_reset(hba->host, 0); +		hba->saved_err = 0; +		hba->saved_uic_err = 0; +	} +	ufshcd_clear_eh_in_progress(hba); + +out: +	scsi_unblock_requests(hba->host);  	pm_runtime_put_sync(hba->dev);  }  /** - * ufshcd_err_handler - Check for fatal errors - * @work: pointer to a work queue structure + * ufshcd_update_uic_error - check and set fatal UIC error flags. + * @hba: per-adapter instance   */ -static void ufshcd_err_handler(struct ufs_hba *hba) +static void ufshcd_update_uic_error(struct ufs_hba *hba)  {  	u32 reg; +	/* PA_INIT_ERROR is fatal and needs UIC reset */ +	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); +	if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) +		hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + +	/* UIC NL/TL/DME errors needs software retry */ +	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); +	if (reg) +		hba->uic_error |= UFSHCD_UIC_NL_ERROR; + +	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); +	if (reg) +		hba->uic_error |= UFSHCD_UIC_TL_ERROR; + +	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); +	if (reg) +		hba->uic_error |= UFSHCD_UIC_DME_ERROR; + +	dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", +			__func__, hba->uic_error); +} + +/** + * ufshcd_check_errors - Check for errors that need s/w attention + * @hba: per-adapter instance + */ +static void ufshcd_check_errors(struct ufs_hba *hba) +{ +	bool queue_eh_work = false; +  	if (hba->errors & INT_FATAL_ERRORS) -		goto fatal_eh; +		queue_eh_work = true;  	if (hba->errors & UIC_ERROR) { -		reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); -		if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) -			goto fatal_eh; +		hba->uic_error = 0; +		ufshcd_update_uic_error(hba); +		if (hba->uic_error) +			queue_eh_work = true;  	} -	return; -fatal_eh: -	hba->ufshcd_state = UFSHCD_STATE_ERROR; -	schedule_work(&hba->feh_workq); + +	if (queue_eh_work) { +		/* handle fatal errors only when link is functional */ +		if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) { +			/* block commands from scsi mid-layer */ +			scsi_block_requests(hba->host); + +			/* transfer error masks to sticky bits */ +			hba->saved_err |= hba->errors; +			hba->saved_uic_err |= hba->uic_error; + +			hba->ufshcd_state = UFSHCD_STATE_ERROR; +			schedule_work(&hba->eh_work); +		} +	} +	/* +	 * if (!queue_eh_work) - +	 * Other errors are either non-fatal where host recovers +	 * itself without s/w intervention or errors that will be +	 * handled by the SCSI core layer. +	 */  }  /** @@ -2469,7 +2555,7 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba)  	tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);  	hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; -	wake_up_interruptible(&hba->ufshcd_tm_wait_queue); +	wake_up(&hba->tm_wq);  }  /** @@ -2481,7 +2567,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)  {  	hba->errors = UFSHCD_ERROR_MASK & intr_status;  	if (hba->errors) -		ufshcd_err_handler(hba); +		ufshcd_check_errors(hba);  	if (intr_status & UFSHCD_UIC_MASK)  		ufshcd_uic_cmd_compl(hba, intr_status); @@ -2519,38 +2605,58 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)  	return retval;  } +static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag) +{ +	int err = 0; +	u32 mask = 1 << tag; +	unsigned long flags; + +	if (!test_bit(tag, &hba->outstanding_tasks)) +		goto out; + +	spin_lock_irqsave(hba->host->host_lock, flags); +	ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR); +	spin_unlock_irqrestore(hba->host->host_lock, flags); + +	/* poll for max. 1 sec to clear door bell register by h/w */ +	err = ufshcd_wait_for_register(hba, +			REG_UTP_TASK_REQ_DOOR_BELL, +			mask, 0, 1000, 1000); +out: +	return err; +} +  /**   * ufshcd_issue_tm_cmd - issues task management commands to controller   * @hba: per adapter instance - * @lrbp: pointer to local reference block + * @lun_id: LUN ID to which TM command is sent + * @task_id: task ID to which the TM command is applicable + * @tm_function: task management function opcode + * @tm_response: task management service response return value   * - * Returns SUCCESS/FAILED + * Returns non-zero value on error, zero on success.   */ -static int -ufshcd_issue_tm_cmd(struct ufs_hba *hba, -		    struct ufshcd_lrb *lrbp, -		    u8 tm_function) +static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, +		u8 tm_function, u8 *tm_response)  {  	struct utp_task_req_desc *task_req_descp;  	struct utp_upiu_task_req *task_req_upiup;  	struct Scsi_Host *host;  	unsigned long flags; -	int free_slot = 0; +	int free_slot;  	int err; +	int task_tag;  	host = hba->host; -	spin_lock_irqsave(host->host_lock, flags); - -	/* If task management queue is full */ -	free_slot = ufshcd_get_tm_free_slot(hba); -	if (free_slot >= hba->nutmrs) { -		spin_unlock_irqrestore(host->host_lock, flags); -		dev_err(hba->dev, "Task management queue full\n"); -		err = FAILED; -		goto out; -	} +	/* +	 * Get free slot, sleep if slots are unavailable. +	 * Even though we use wait_event() which sleeps indefinitely, +	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT. +	 */ +	wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); +	spin_lock_irqsave(host->host_lock, flags);  	task_req_descp = hba->utmrdl_base_addr;  	task_req_descp += free_slot; @@ -2562,18 +2668,15 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,  	/* Configure task request UPIU */  	task_req_upiup =  		(struct utp_upiu_task_req *) task_req_descp->task_req_upiu; +	task_tag = hba->nutrs + free_slot;  	task_req_upiup->header.dword_0 =  		UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0, -					      lrbp->lun, lrbp->task_tag); +					      lun_id, task_tag);  	task_req_upiup->header.dword_1 =  		UPIU_HEADER_DWORD(0, tm_function, 0, 0); -	task_req_upiup->input_param1 = lrbp->lun; -	task_req_upiup->input_param1 = -		cpu_to_be32(task_req_upiup->input_param1); -	task_req_upiup->input_param2 = lrbp->task_tag; -	task_req_upiup->input_param2 = -		cpu_to_be32(task_req_upiup->input_param2); +	task_req_upiup->input_param1 = cpu_to_be32(lun_id); +	task_req_upiup->input_param2 = cpu_to_be32(task_id);  	/* send command to the controller */  	__set_bit(free_slot, &hba->outstanding_tasks); @@ -2582,91 +2685,88 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,  	spin_unlock_irqrestore(host->host_lock, flags);  	/* wait until the task management command is completed */ -	err = -	wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue, -					 (test_bit(free_slot, -					 &hba->tm_condition) != 0), -					 60 * HZ); +	err = wait_event_timeout(hba->tm_wq, +			test_bit(free_slot, &hba->tm_condition), +			msecs_to_jiffies(TM_CMD_TIMEOUT));  	if (!err) { -		dev_err(hba->dev, -			"Task management command timed-out\n"); -		err = FAILED; -		goto out; +		dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", +				__func__, tm_function); +		if (ufshcd_clear_tm_cmd(hba, free_slot)) +			dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n", +					__func__, free_slot); +		err = -ETIMEDOUT; +	} else { +		err = ufshcd_task_req_compl(hba, free_slot, tm_response);  	} +  	clear_bit(free_slot, &hba->tm_condition); -	err = ufshcd_task_req_compl(hba, free_slot); -out: +	ufshcd_put_tm_slot(hba, free_slot); +	wake_up(&hba->tm_tag_wq); +  	return err;  }  /** - * ufshcd_device_reset - reset device and abort all the pending commands + * ufshcd_eh_device_reset_handler - device reset handler registered to + *                                    scsi layer.   * @cmd: SCSI command pointer   *   * Returns SUCCESS/FAILED   */ -static int ufshcd_device_reset(struct scsi_cmnd *cmd) +static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)  {  	struct Scsi_Host *host;  	struct ufs_hba *hba;  	unsigned int tag;  	u32 pos;  	int err; +	u8 resp = 0xF; +	struct ufshcd_lrb *lrbp; +	unsigned long flags;  	host = cmd->device->host;  	hba = shost_priv(host);  	tag = cmd->request->tag; -	err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET); -	if (err == FAILED) +	lrbp = &hba->lrb[tag]; +	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp); +	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { +		if (!err) +			err = resp;  		goto out; +	} -	for (pos = 0; pos < hba->nutrs; pos++) { -		if (test_bit(pos, &hba->outstanding_reqs) && -		    (hba->lrb[tag].lun == hba->lrb[pos].lun)) { - -			/* clear the respective UTRLCLR register bit */ -			ufshcd_utrl_clear(hba, pos); - -			clear_bit(pos, &hba->outstanding_reqs); - -			if (hba->lrb[pos].cmd) { -				scsi_dma_unmap(hba->lrb[pos].cmd); -				hba->lrb[pos].cmd->result = -					DID_ABORT << 16; -				hba->lrb[pos].cmd->scsi_done(cmd); -				hba->lrb[pos].cmd = NULL; -				clear_bit_unlock(pos, &hba->lrb_in_use); -				wake_up(&hba->dev_cmd.tag_wq); -			} +	/* clear the commands that were pending for corresponding LUN */ +	for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { +		if (hba->lrb[pos].lun == lrbp->lun) { +			err = ufshcd_clear_cmd(hba, pos); +			if (err) +				break;  		} -	} /* end of for */ +	} +	spin_lock_irqsave(host->host_lock, flags); +	ufshcd_transfer_req_compl(hba); +	spin_unlock_irqrestore(host->host_lock, flags);  out: +	if (!err) { +		err = SUCCESS; +	} else { +		dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); +		err = FAILED; +	}  	return err;  }  /** - * ufshcd_host_reset - Main reset function registered with scsi layer - * @cmd: SCSI command pointer - * - * Returns SUCCESS/FAILED - */ -static int ufshcd_host_reset(struct scsi_cmnd *cmd) -{ -	struct ufs_hba *hba; - -	hba = shost_priv(cmd->device->host); - -	if (hba->ufshcd_state == UFSHCD_STATE_RESET) -		return SUCCESS; - -	return ufshcd_do_reset(hba); -} - -/**   * ufshcd_abort - abort a specific command   * @cmd: SCSI command pointer   * + * Abort the pending command in device by sending UFS_ABORT_TASK task management + * command, and in host controller by clearing the door-bell register. There can + * be race between controller sending the command to the device while abort is + * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is + * really issued and then try to abort it. + *   * Returns SUCCESS/FAILED   */  static int ufshcd_abort(struct scsi_cmnd *cmd) @@ -2675,33 +2775,68 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)  	struct ufs_hba *hba;  	unsigned long flags;  	unsigned int tag; -	int err; +	int err = 0; +	int poll_cnt; +	u8 resp = 0xF; +	struct ufshcd_lrb *lrbp;  	host = cmd->device->host;  	hba = shost_priv(host);  	tag = cmd->request->tag; -	spin_lock_irqsave(host->host_lock, flags); +	/* If command is already aborted/completed, return SUCCESS */ +	if (!(test_bit(tag, &hba->outstanding_reqs))) +		goto out; -	/* check if command is still pending */ -	if (!(test_bit(tag, &hba->outstanding_reqs))) { -		err = FAILED; -		spin_unlock_irqrestore(host->host_lock, flags); +	lrbp = &hba->lrb[tag]; +	for (poll_cnt = 100; poll_cnt; poll_cnt--) { +		err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, +				UFS_QUERY_TASK, &resp); +		if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) { +			/* cmd pending in the device */ +			break; +		} else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) { +			u32 reg; + +			/* +			 * cmd not pending in the device, check if it is +			 * in transition. +			 */ +			reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); +			if (reg & (1 << tag)) { +				/* sleep for max. 200us to stabilize */ +				usleep_range(100, 200); +				continue; +			} +			/* command completed already */ +			goto out; +		} else { +			if (!err) +				err = resp; /* service response error */ +			goto out; +		} +	} + +	if (!poll_cnt) { +		err = -EBUSY;  		goto out;  	} -	spin_unlock_irqrestore(host->host_lock, flags); -	err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK); -	if (err == FAILED) +	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag, +			UFS_ABORT_TASK, &resp); +	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) { +		if (!err) +			err = resp; /* service response error */ +		goto out; +	} + +	err = ufshcd_clear_cmd(hba, tag); +	if (err)  		goto out;  	scsi_dma_unmap(cmd);  	spin_lock_irqsave(host->host_lock, flags); - -	/* clear the respective UTRLCLR register bit */ -	ufshcd_utrl_clear(hba, tag); -  	__clear_bit(tag, &hba->outstanding_reqs);  	hba->lrb[tag].cmd = NULL;  	spin_unlock_irqrestore(host->host_lock, flags); @@ -2709,6 +2844,129 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)  	clear_bit_unlock(tag, &hba->lrb_in_use);  	wake_up(&hba->dev_cmd.tag_wq);  out: +	if (!err) { +		err = SUCCESS; +	} else { +		dev_err(hba->dev, "%s: failed with err %d\n", __func__, err); +		err = FAILED; +	} + +	return err; +} + +/** + * ufshcd_host_reset_and_restore - reset and restore host controller + * @hba: per-adapter instance + * + * Note that host controller reset may issue DME_RESET to + * local and remote (device) Uni-Pro stack and the attributes + * are reset to default state. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) +{ +	int err; +	async_cookie_t cookie; +	unsigned long flags; + +	/* Reset the host controller */ +	spin_lock_irqsave(hba->host->host_lock, flags); +	ufshcd_hba_stop(hba); +	spin_unlock_irqrestore(hba->host->host_lock, flags); + +	err = ufshcd_hba_enable(hba); +	if (err) +		goto out; + +	/* Establish the link again and restore the device */ +	cookie = async_schedule(ufshcd_async_scan, hba); +	/* wait for async scan to be completed */ +	async_synchronize_cookie(++cookie); +	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) +		err = -EIO; +out: +	if (err) +		dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); + +	return err; +} + +/** + * ufshcd_reset_and_restore - reset and re-initialize host/device + * @hba: per-adapter instance + * + * Reset and recover device, host and re-establish link. This + * is helpful to recover the communication in fatal error conditions. + * + * Returns zero on success, non-zero on failure + */ +static int ufshcd_reset_and_restore(struct ufs_hba *hba) +{ +	int err = 0; +	unsigned long flags; + +	err = ufshcd_host_reset_and_restore(hba); + +	/* +	 * After reset the door-bell might be cleared, complete +	 * outstanding requests in s/w here. +	 */ +	spin_lock_irqsave(hba->host->host_lock, flags); +	ufshcd_transfer_req_compl(hba); +	ufshcd_tmc_handler(hba); +	spin_unlock_irqrestore(hba->host->host_lock, flags); + +	return err; +} + +/** + * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer + * @cmd - SCSI command pointer + * + * Returns SUCCESS/FAILED + */ +static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd) +{ +	int err; +	unsigned long flags; +	struct ufs_hba *hba; + +	hba = shost_priv(cmd->device->host); + +	/* +	 * Check if there is any race with fatal error handling. +	 * If so, wait for it to complete. Even though fatal error +	 * handling does reset and restore in some cases, don't assume +	 * anything out of it. We are just avoiding race here. +	 */ +	do { +		spin_lock_irqsave(hba->host->host_lock, flags); +		if (!(work_pending(&hba->eh_work) || +				hba->ufshcd_state == UFSHCD_STATE_RESET)) +			break; +		spin_unlock_irqrestore(hba->host->host_lock, flags); +		dev_dbg(hba->dev, "%s: reset in progress\n", __func__); +		flush_work(&hba->eh_work); +	} while (1); + +	hba->ufshcd_state = UFSHCD_STATE_RESET; +	ufshcd_set_eh_in_progress(hba); +	spin_unlock_irqrestore(hba->host->host_lock, flags); + +	err = ufshcd_reset_and_restore(hba); + +	spin_lock_irqsave(hba->host->host_lock, flags); +	if (!err) { +		err = SUCCESS; +		hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; +	} else { +		err = FAILED; +		hba->ufshcd_state = UFSHCD_STATE_ERROR; +	} +	ufshcd_clear_eh_in_progress(hba); +	spin_unlock_irqrestore(hba->host->host_lock, flags); +  	return err;  } @@ -2737,8 +2995,13 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)  		goto out;  	ufshcd_force_reset_auto_bkops(hba); -	scsi_scan_host(hba->host); -	pm_runtime_put_sync(hba->dev); +	hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; + +	/* If we are in error handling context no need to scan the host */ +	if (!ufshcd_eh_in_progress(hba)) { +		scsi_scan_host(hba->host); +		pm_runtime_put_sync(hba->dev); +	}  out:  	return;  } @@ -2751,8 +3014,8 @@ static struct scsi_host_template ufshcd_driver_template = {  	.slave_alloc		= ufshcd_slave_alloc,  	.slave_destroy		= ufshcd_slave_destroy,  	.eh_abort_handler	= ufshcd_abort, -	.eh_device_reset_handler = ufshcd_device_reset, -	.eh_host_reset_handler	= ufshcd_host_reset, +	.eh_device_reset_handler = ufshcd_eh_device_reset_handler, +	.eh_host_reset_handler   = ufshcd_eh_host_reset_handler,  	.this_id		= -1,  	.sg_tablesize		= SG_ALL,  	.cmd_per_lun		= UFSHCD_CMD_PER_LUN, @@ -2916,10 +3179,11 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,  	host->max_cmd_len = MAX_CDB_SIZE;  	/* Initailize wait queue for task management */ -	init_waitqueue_head(&hba->ufshcd_tm_wait_queue); +	init_waitqueue_head(&hba->tm_wq); +	init_waitqueue_head(&hba->tm_tag_wq);  	/* Initialize work queues */ -	INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler); +	INIT_WORK(&hba->eh_work, ufshcd_err_handler);  	INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);  	/* Initialize UIC command mutex */ diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 577679a2d18..acf318e338e 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -174,15 +174,21 @@ struct ufs_dev_cmd {   * @irq: Irq number of the controller   * @active_uic_cmd: handle of active UIC command   * @uic_cmd_mutex: mutex for uic command - * @ufshcd_tm_wait_queue: wait queue for task management + * @tm_wq: wait queue for task management + * @tm_tag_wq: wait queue for free task management slots + * @tm_slots_in_use: bit map of task management request slots in use   * @pwr_done: completion for power mode change   * @tm_condition: condition variable for task management   * @ufshcd_state: UFSHCD states + * @eh_flags: Error handling flags   * @intr_mask: Interrupt Mask Bits   * @ee_ctrl_mask: Exception event control mask - * @feh_workq: Work queue for fatal controller error handling + * @eh_work: Worker to handle UFS errors that require s/w attention   * @eeh_work: Worker to handle exception events   * @errors: HBA errors + * @uic_error: UFS interconnect layer error status + * @saved_err: sticky error mask + * @saved_uic_err: sticky UIC error mask   * @dev_cmd: ufs device management command information   * @auto_bkops_enabled: to track whether bkops is enabled in device   */ @@ -217,21 +223,27 @@ struct ufs_hba {  	struct uic_command *active_uic_cmd;  	struct mutex uic_cmd_mutex; -	wait_queue_head_t ufshcd_tm_wait_queue; +	wait_queue_head_t tm_wq; +	wait_queue_head_t tm_tag_wq;  	unsigned long tm_condition; +	unsigned long tm_slots_in_use;  	struct completion *pwr_done;  	u32 ufshcd_state; +	u32 eh_flags;  	u32 intr_mask;  	u16 ee_ctrl_mask;  	/* Work Queues */ -	struct work_struct feh_workq; +	struct work_struct eh_work;  	struct work_struct eeh_work;  	/* HBA Errors */  	u32 errors; +	u32 uic_error; +	u32 saved_err; +	u32 saved_uic_err;  	/* Device management request data */  	struct ufs_dev_cmd dev_cmd; @@ -263,6 +275,8 @@ static inline void check_upiu_size(void)  		GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);  } +extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state); +extern int ufshcd_resume(struct ufs_hba *hba);  extern int ufshcd_runtime_suspend(struct ufs_hba *hba);  extern int ufshcd_runtime_resume(struct ufs_hba *hba);  extern int ufshcd_runtime_idle(struct ufs_hba *hba); diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index 0475c6619a6..9abc7e32b43 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -304,10 +304,10 @@ enum {   * @size: size of physical segment DW-3   */  struct ufshcd_sg_entry { -	u32    base_addr; -	u32    upper_addr; -	u32    reserved; -	u32    size; +	__le32    base_addr; +	__le32    upper_addr; +	__le32    reserved; +	__le32    size;  };  /** @@ -330,10 +330,10 @@ struct utp_transfer_cmd_desc {   * @dword3: Descriptor Header DW3   */  struct request_desc_header { -	u32 dword_0; -	u32 dword_1; -	u32 dword_2; -	u32 dword_3; +	__le32 dword_0; +	__le32 dword_1; +	__le32 dword_2; +	__le32 dword_3;  };  /** @@ -352,16 +352,16 @@ struct utp_transfer_req_desc {  	struct request_desc_header header;  	/* DW 4-5*/ -	u32  command_desc_base_addr_lo; -	u32  command_desc_base_addr_hi; +	__le32  command_desc_base_addr_lo; +	__le32  command_desc_base_addr_hi;  	/* DW 6 */ -	u16  response_upiu_length; -	u16  response_upiu_offset; +	__le16  response_upiu_length; +	__le16  response_upiu_offset;  	/* DW 7 */ -	u16  prd_table_length; -	u16  prd_table_offset; +	__le16  prd_table_length; +	__le16  prd_table_offset;  };  /** @@ -376,10 +376,10 @@ struct utp_task_req_desc {  	struct request_desc_header header;  	/* DW 4-11 */ -	u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS]; +	__le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];  	/* DW 12-19 */ -	u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS]; +	__le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];  };  #endif /* End of Header */ diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 74b88efde6a..308256b5e4c 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -23,6 +23,7 @@  #include <linux/virtio_config.h>  #include <linux/virtio_scsi.h>  #include <linux/cpu.h> +#include <linux/blkdev.h>  #include <scsi/scsi_host.h>  #include <scsi/scsi_device.h>  #include <scsi/scsi_cmnd.h> @@ -37,6 +38,7 @@ struct virtio_scsi_cmd {  	struct completion *comp;  	union {  		struct virtio_scsi_cmd_req       cmd; +		struct virtio_scsi_cmd_req_pi    cmd_pi;  		struct virtio_scsi_ctrl_tmf_req  tmf;  		struct virtio_scsi_ctrl_an_req   an;  	} req; @@ -73,17 +75,12 @@ struct virtio_scsi_vq {   * queue, and also lets the driver optimize the IRQ affinity for the virtqueues   * (each virtqueue's affinity is set to the CPU that "owns" the queue).   * - * An interesting effect of this policy is that only writes to req_vq need to - * take the tgt_lock.  Read can be done outside the lock because: + * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq + * could be done locklessly, but we do not do it yet.   * - * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1. - *   In that case, no other CPU is reading req_vq: even if they were in - *   virtscsi_queuecommand_multi, they would be spinning on tgt_lock. - * - * - reads of req_vq only occur when the target is not idle (reqs != 0). - *   A CPU that enters virtscsi_queuecommand_multi will not modify req_vq. - * - * Similarly, decrements of reqs are never concurrent with writes of req_vq. + * Decrements of reqs are never concurrent with writes of req_vq: before the + * decrement reqs will be != 0; after the decrement the virtqueue completion + * routine will not use the req_vq so it can be changed by a new request.   * Thus they can happen outside the tgt_lock, provided of course we make reqs   * an atomic_t.   */ @@ -204,7 +201,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)  			set_driver_byte(sc, DRIVER_SENSE);  	} -	mempool_free(cmd, virtscsi_cmd_pool);  	sc->scsi_done(sc);  	atomic_dec(&tgt->reqs); @@ -224,6 +220,9 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,  		virtqueue_disable_cb(vq);  		while ((buf = virtqueue_get_buf(vq, &len)) != NULL)  			fn(vscsi, buf); + +		if (unlikely(virtqueue_is_broken(vq))) +			break;  	} while (!virtqueue_enable_cb(vq));  	spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);  } @@ -235,49 +234,25 @@ static void virtscsi_req_done(struct virtqueue *vq)  	int index = vq->index - VIRTIO_SCSI_VQ_BASE;  	struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; -	/* -	 * Read req_vq before decrementing the reqs field in -	 * virtscsi_complete_cmd. -	 * -	 * With barriers: -	 * -	 * 	CPU #0			virtscsi_queuecommand_multi (CPU #1) -	 * 	------------------------------------------------------------ -	 * 	lock vq_lock -	 * 	read req_vq -	 * 	read reqs (reqs = 1) -	 * 	write reqs (reqs = 0) -	 * 				increment reqs (reqs = 1) -	 * 				write req_vq -	 * -	 * Possible reordering without barriers: -	 * -	 * 	CPU #0			virtscsi_queuecommand_multi (CPU #1) -	 * 	------------------------------------------------------------ -	 * 	lock vq_lock -	 * 	read reqs (reqs = 1) -	 * 	write reqs (reqs = 0) -	 * 				increment reqs (reqs = 1) -	 * 				write req_vq -	 * 	read (wrong) req_vq -	 * -	 * We do not need a full smp_rmb, because req_vq is required to get -	 * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored -	 * in the virtqueue as the user token. -	 */ -	smp_read_barrier_depends(); -  	virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);  }; +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ +	int i, num_vqs; + +	num_vqs = vscsi->num_queues; +	for (i = 0; i < num_vqs; i++) +		virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], +				 virtscsi_complete_cmd); +} +  static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)  {  	struct virtio_scsi_cmd *cmd = buf;  	if (cmd->comp)  		complete_all(cmd->comp); -	else -		mempool_free(cmd, virtscsi_cmd_pool);  }  static void virtscsi_ctrl_done(struct virtqueue *vq) @@ -288,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)  	virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);  }; +static void virtscsi_handle_event(struct work_struct *work); +  static int virtscsi_kick_event(struct virtio_scsi *vscsi,  			       struct virtio_scsi_event_node *event_node)  { @@ -295,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,  	struct scatterlist sg;  	unsigned long flags; +	INIT_WORK(&event_node->work, virtscsi_handle_event);  	sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));  	spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); @@ -412,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)  {  	struct virtio_scsi_event_node *event_node = buf; -	INIT_WORK(&event_node->work, virtscsi_handle_event);  	schedule_work(&event_node->work);  } @@ -430,14 +407,13 @@ static void virtscsi_event_done(struct virtqueue *vq)   * @cmd		: command structure   * @req_size	: size of the request buffer   * @resp_size	: size of the response buffer - * @gfp	: flags to use for memory allocations   */  static int virtscsi_add_cmd(struct virtqueue *vq,  			    struct virtio_scsi_cmd *cmd, -			    size_t req_size, size_t resp_size, gfp_t gfp) +			    size_t req_size, size_t resp_size)  {  	struct scsi_cmnd *sc = cmd->sc; -	struct scatterlist *sgs[4], req, resp; +	struct scatterlist *sgs[6], req, resp;  	struct sg_table *out, *in;  	unsigned out_num = 0, in_num = 0; @@ -455,30 +431,38 @@ static int virtscsi_add_cmd(struct virtqueue *vq,  	sgs[out_num++] = &req;  	/* Data-out buffer.  */ -	if (out) +	if (out) { +		/* Place WRITE protection SGLs before Data OUT payload */ +		if (scsi_prot_sg_count(sc)) +			sgs[out_num++] = scsi_prot_sglist(sc);  		sgs[out_num++] = out->sgl; +	}  	/* Response header.  */  	sg_init_one(&resp, &cmd->resp, resp_size);  	sgs[out_num + in_num++] = &resp;  	/* Data-in buffer */ -	if (in) +	if (in) { +		/* Place READ protection SGLs before Data IN payload */ +		if (scsi_prot_sg_count(sc)) +			sgs[out_num + in_num++] = scsi_prot_sglist(sc);  		sgs[out_num + in_num++] = in->sgl; +	} -	return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp); +	return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC);  }  static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,  			     struct virtio_scsi_cmd *cmd, -			     size_t req_size, size_t resp_size, gfp_t gfp) +			     size_t req_size, size_t resp_size)  {  	unsigned long flags;  	int err;  	bool needs_kick = false;  	spin_lock_irqsave(&vq->vq_lock, flags); -	err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp); +	err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size);  	if (!err)  		needs_kick = virtqueue_kick_prepare(vq->vq); @@ -489,14 +473,46 @@ static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,  	return err;  } +static void virtio_scsi_init_hdr(struct virtio_scsi_cmd_req *cmd, +				 struct scsi_cmnd *sc) +{ +	cmd->lun[0] = 1; +	cmd->lun[1] = sc->device->id; +	cmd->lun[2] = (sc->device->lun >> 8) | 0x40; +	cmd->lun[3] = sc->device->lun & 0xff; +	cmd->tag = (unsigned long)sc; +	cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; +	cmd->prio = 0; +	cmd->crn = 0; +} + +static void virtio_scsi_init_hdr_pi(struct virtio_scsi_cmd_req_pi *cmd_pi, +				    struct scsi_cmnd *sc) +{ +	struct request *rq = sc->request; +	struct blk_integrity *bi; + +	virtio_scsi_init_hdr((struct virtio_scsi_cmd_req *)cmd_pi, sc); + +	if (!rq || !scsi_prot_sg_count(sc)) +		return; + +	bi = blk_get_integrity(rq->rq_disk); + +	if (sc->sc_data_direction == DMA_TO_DEVICE) +		cmd_pi->pi_bytesout = blk_rq_sectors(rq) * bi->tuple_size; +	else if (sc->sc_data_direction == DMA_FROM_DEVICE) +		cmd_pi->pi_bytesin = blk_rq_sectors(rq) * bi->tuple_size; +} +  static int virtscsi_queuecommand(struct virtio_scsi *vscsi,  				 struct virtio_scsi_vq *req_vq,  				 struct scsi_cmnd *sc)  { -	struct virtio_scsi_cmd *cmd; -	int ret; -  	struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); +	struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); +	int req_size; +  	BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);  	/* TODO: check feature bit and fail if unsupported?  */ @@ -505,36 +521,24 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,  	dev_dbg(&sc->device->sdev_gendev,  		"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); -	ret = SCSI_MLQUEUE_HOST_BUSY; -	cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC); -	if (!cmd) -		goto out; -  	memset(cmd, 0, sizeof(*cmd));  	cmd->sc = sc; -	cmd->req.cmd = (struct virtio_scsi_cmd_req){ -		.lun[0] = 1, -		.lun[1] = sc->device->id, -		.lun[2] = (sc->device->lun >> 8) | 0x40, -		.lun[3] = sc->device->lun & 0xff, -		.tag = (unsigned long)sc, -		.task_attr = VIRTIO_SCSI_S_SIMPLE, -		.prio = 0, -		.crn = 0, -	};  	BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); -	memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); -	if (virtscsi_kick_cmd(req_vq, cmd, -			      sizeof cmd->req.cmd, sizeof cmd->resp.cmd, -			      GFP_ATOMIC) == 0) -		ret = 0; -	else -		mempool_free(cmd, virtscsi_cmd_pool); +	if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { +		virtio_scsi_init_hdr_pi(&cmd->req.cmd_pi, sc); +		memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); +		req_size = sizeof(cmd->req.cmd_pi); +	} else { +		virtio_scsi_init_hdr(&cmd->req.cmd, sc); +		memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); +		req_size = sizeof(cmd->req.cmd); +	} -out: -	return ret; +	if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0) +		return SCSI_MLQUEUE_HOST_BUSY; +	return 0;  }  static int virtscsi_queuecommand_single(struct Scsi_Host *sh, @@ -557,12 +561,8 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,  	spin_lock_irqsave(&tgt->tgt_lock, flags); -	/* -	 * The memory barrier after atomic_inc_return matches -	 * the smp_read_barrier_depends() in virtscsi_req_done. -	 */  	if (atomic_inc_return(&tgt->reqs) > 1) -		vq = ACCESS_ONCE(tgt->req_vq); +		vq = tgt->req_vq;  	else {  		queue_num = smp_processor_id();  		while (unlikely(queue_num >= vscsi->num_queues)) @@ -593,8 +593,7 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)  	cmd->comp = ∁  	if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, -			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf, -			      GFP_NOIO) < 0) +			      sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0)  		goto out;  	wait_for_completion(&comp); @@ -602,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)  	    cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)  		ret = SUCCESS; +	/* +	 * The spec guarantees that all requests related to the TMF have +	 * been completed, but the callback might not have run yet if +	 * we're using independent interrupts (e.g. MSI).  Poll the +	 * virtqueues once. +	 * +	 * In the abort case, sc->scsi_done will do nothing, because +	 * the block layer must have detected a timeout and as a result +	 * REQ_ATOM_COMPLETE has been set. +	 */ +	virtscsi_poll_requests(vscsi); +  out:  	mempool_free(cmd, virtscsi_cmd_pool);  	return ret; @@ -680,6 +691,7 @@ static struct scsi_host_template virtscsi_host_template_single = {  	.name = "Virtio SCSI HBA",  	.proc_name = "virtio_scsi",  	.this_id = -1, +	.cmd_size = sizeof(struct virtio_scsi_cmd),  	.queuecommand = virtscsi_queuecommand_single,  	.eh_abort_handler = virtscsi_abort,  	.eh_device_reset_handler = virtscsi_device_reset, @@ -696,6 +708,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {  	.name = "Virtio SCSI HBA",  	.proc_name = "virtio_scsi",  	.this_id = -1, +	.cmd_size = sizeof(struct virtio_scsi_cmd),  	.queuecommand = virtscsi_queuecommand_multi,  	.eh_abort_handler = virtscsi_abort,  	.eh_device_reset_handler = virtscsi_device_reset, @@ -710,19 +723,15 @@ static struct scsi_host_template virtscsi_host_template_multi = {  #define virtscsi_config_get(vdev, fld) \  	({ \  		typeof(((struct virtio_scsi_config *)0)->fld) __val; \ -		vdev->config->get(vdev, \ -				  offsetof(struct virtio_scsi_config, fld), \ -				  &__val, sizeof(__val)); \ +		virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \  		__val; \  	})  #define virtscsi_config_set(vdev, fld, val) \ -	(void)({ \ +	do { \  		typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ -		vdev->config->set(vdev, \ -				  offsetof(struct virtio_scsi_config, fld), \ -				  &__val, sizeof(__val)); \ -	}) +		virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ +	} while(0)  static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)  { @@ -751,8 +760,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)  		vscsi->affinity_hint_set = true;  	} else { -		for (i = 0; i < vscsi->num_queues; i++) +		for (i = 0; i < vscsi->num_queues; i++) { +			if (!vscsi->req_vqs[i].vq) +				continue; +  			virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); +		}  		vscsi->affinity_hint_set = false;  	} @@ -872,7 +885,7 @@ static int virtscsi_probe(struct virtio_device *vdev)  {  	struct Scsi_Host *shost;  	struct virtio_scsi *vscsi; -	int err; +	int err, host_prot;  	u32 sg_elems, num_targets;  	u32 cmd_per_lun;  	u32 num_queues; @@ -922,6 +935,16 @@ static int virtscsi_probe(struct virtio_device *vdev)  	shost->max_id = num_targets;  	shost->max_channel = 0;  	shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; + +	if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { +		host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | +			    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | +			    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + +		scsi_host_set_prot(shost, host_prot); +		scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); +	} +  	err = scsi_add_host(shost, &vdev->dev);  	if (err)  		goto scsi_add_host_failed; @@ -954,9 +977,13 @@ static void virtscsi_remove(struct virtio_device *vdev)  	scsi_host_put(shost);  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int virtscsi_freeze(struct virtio_device *vdev)  { +	struct Scsi_Host *sh = virtio_scsi_host(vdev); +	struct virtio_scsi *vscsi = shost_priv(sh); + +	unregister_hotcpu_notifier(&vscsi->nb);  	virtscsi_remove_vqs(vdev);  	return 0;  } @@ -965,8 +992,17 @@ static int virtscsi_restore(struct virtio_device *vdev)  {  	struct Scsi_Host *sh = virtio_scsi_host(vdev);  	struct virtio_scsi *vscsi = shost_priv(sh); +	int err; + +	err = virtscsi_init(vdev, vscsi); +	if (err) +		return err; -	return virtscsi_init(vdev, vscsi); +	err = register_hotcpu_notifier(&vscsi->nb); +	if (err) +		vdev->config->del_vqs(vdev); + +	return err;  }  #endif @@ -978,6 +1014,7 @@ static struct virtio_device_id id_table[] = {  static unsigned int features[] = {  	VIRTIO_SCSI_F_HOTPLUG,  	VIRTIO_SCSI_F_CHANGE, +	VIRTIO_SCSI_F_T10_PI,  };  static struct virtio_driver virtio_scsi_driver = { @@ -988,7 +1025,7 @@ static struct virtio_driver virtio_scsi_driver = {  	.id_table = id_table,  	.probe = virtscsi_probe,  	.scan = virtscsi_scan, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  	.freeze = virtscsi_freeze,  	.restore = virtscsi_restore,  #endif diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 3bfaa66fa0d..c88e1468aad 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -1,7 +1,7 @@  /*   * Linux driver for VMware's para-virtualized SCSI HBA.   * - * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms of the GNU General Public License as published by the @@ -32,6 +32,7 @@  #include <scsi/scsi_host.h>  #include <scsi/scsi_cmnd.h>  #include <scsi/scsi_device.h> +#include <scsi/scsi_tcq.h>  #include "vmw_pvscsi.h" @@ -44,7 +45,7 @@ MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);  #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING	8  #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING	1 -#define PVSCSI_DEFAULT_QUEUE_DEPTH		64 +#define PVSCSI_DEFAULT_QUEUE_DEPTH		254  #define SGL_SIZE				PAGE_SIZE  struct pvscsi_sg_list { @@ -62,6 +63,7 @@ struct pvscsi_ctx {  	dma_addr_t		dataPA;  	dma_addr_t		sensePA;  	dma_addr_t		sglPA; +	struct completion	*abort_cmp;  };  struct pvscsi_adapter { @@ -71,6 +73,7 @@ struct pvscsi_adapter {  	bool				use_msi;  	bool				use_msix;  	bool				use_msg; +	bool				use_req_threshold;  	spinlock_t			hw_lock; @@ -102,18 +105,22 @@ struct pvscsi_adapter {  /* Command line parameters */ -static int pvscsi_ring_pages     = PVSCSI_DEFAULT_NUM_PAGES_PER_RING; +static int pvscsi_ring_pages;  static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;  static int pvscsi_cmd_per_lun    = PVSCSI_DEFAULT_QUEUE_DEPTH;  static bool pvscsi_disable_msi;  static bool pvscsi_disable_msix;  static bool pvscsi_use_msg       = true; +static bool pvscsi_use_req_threshold = true;  #define PVSCSI_RW (S_IRUSR | S_IWUSR)  module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);  MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" -		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")"); +		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) +		 "[up to 16 targets]," +		 __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) +		 "[for 16+ targets])");  module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);  MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" @@ -121,7 +128,7 @@ MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="  module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);  MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" -		 __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")"); +		 __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")");  module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);  MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); @@ -132,6 +139,10 @@ MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");  module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);  MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); +module_param_named(use_req_threshold, pvscsi_use_req_threshold, +		   bool, PVSCSI_RW); +MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)"); +  static const struct pci_device_id pvscsi_pci_tbl[] = {  	{ PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },  	{ 0 } @@ -177,6 +188,7 @@ static void pvscsi_release_context(struct pvscsi_adapter *adapter,  				   struct pvscsi_ctx *ctx)  {  	ctx->cmd = NULL; +	ctx->abort_cmp = NULL;  	list_add(&ctx->list, &adapter->cmd_pool);  } @@ -280,10 +292,15 @@ static int scsi_is_rw(unsigned char op)  static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,  			   unsigned char op)  { -	if (scsi_is_rw(op)) -		pvscsi_kick_rw_io(adapter); -	else +	if (scsi_is_rw(op)) { +		struct PVSCSIRingsState *s = adapter->rings_state; + +		if (!adapter->use_req_threshold || +		    s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold) +			pvscsi_kick_rw_io(adapter); +	} else {  		pvscsi_process_request_ring(adapter); +	}  }  static void ll_adapter_reset(const struct pvscsi_adapter *adapter) @@ -487,6 +504,35 @@ static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)  	}  } +static int pvscsi_change_queue_depth(struct scsi_device *sdev, +				     int qdepth, +				     int reason) +{ +	int max_depth; +	struct Scsi_Host *shost = sdev->host; + +	if (reason != SCSI_QDEPTH_DEFAULT) +		/* +		 * We support only changing default. +		 */ +		return -EOPNOTSUPP; + +	max_depth = shost->can_queue; +	if (!sdev->tagged_supported) +		max_depth = 1; +	if (qdepth > max_depth) +		qdepth = max_depth; +	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + +	if (sdev->inquiry_len > 7) +		sdev_printk(KERN_INFO, sdev, +			    "qdepth(%d), tagged(%d), simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", +			    sdev->queue_depth, sdev->tagged_supported, +			    sdev->simple_tags, sdev->ordered_tags, +			    sdev->scsi_level, (sdev->inquiry[7] & 2) >> 1); +	return sdev->queue_depth; +} +  /*   * Pull a completion descriptor off and pass the completion back   * to the SCSI mid layer. @@ -496,15 +542,27 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,  {  	struct pvscsi_ctx *ctx;  	struct scsi_cmnd *cmd; +	struct completion *abort_cmp;  	u32 btstat = e->hostStatus;  	u32 sdstat = e->scsiStatus;  	ctx = pvscsi_get_context(adapter, e->context);  	cmd = ctx->cmd; +	abort_cmp = ctx->abort_cmp;  	pvscsi_unmap_buffers(adapter, ctx);  	pvscsi_release_context(adapter, ctx); -	cmd->result = 0; +	if (abort_cmp) { +		/* +		 * The command was requested to be aborted. Just signal that +		 * the request completed and swallow the actual cmd completion +		 * here. The abort handler will post a completion for this +		 * command indicating that it got successfully aborted. +		 */ +		complete(abort_cmp); +		return; +	} +	cmd->result = 0;  	if (sdstat != SAM_STAT_GOOD &&  	    (btstat == BTSTAT_SUCCESS ||  	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED || @@ -726,6 +784,8 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)  	struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);  	struct pvscsi_ctx *ctx;  	unsigned long flags; +	int result = SUCCESS; +	DECLARE_COMPLETION_ONSTACK(abort_cmp);  	scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",  		    adapter->host->host_no, cmd); @@ -748,13 +808,40 @@ static int pvscsi_abort(struct scsi_cmnd *cmd)  		goto out;  	} +	/* +	 * Mark that the command has been requested to be aborted and issue +	 * the abort. +	 */ +	ctx->abort_cmp = &abort_cmp; +  	pvscsi_abort_cmd(adapter, ctx); +	spin_unlock_irqrestore(&adapter->hw_lock, flags); +	/* Wait for 2 secs for the completion. */ +	wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); +	spin_lock_irqsave(&adapter->hw_lock, flags); -	pvscsi_process_completion_ring(adapter); +	if (!completion_done(&abort_cmp)) { +		/* +		 * Failed to abort the command, unmark the fact that it +		 * was requested to be aborted. +		 */ +		ctx->abort_cmp = NULL; +		result = FAILED; +		scmd_printk(KERN_DEBUG, cmd, +			    "Failed to get completion for aborted cmd %p\n", +			    cmd); +		goto out; +	} + +	/* +	 * Successfully aborted the command. +	 */ +	cmd->result = (DID_ABORT << 16); +	cmd->scsi_done(cmd);  out:  	spin_unlock_irqrestore(&adapter->hw_lock, flags); -	return SUCCESS; +	return result;  }  /* @@ -911,6 +998,7 @@ static struct scsi_host_template pvscsi_template = {  	.dma_boundary			= UINT_MAX,  	.max_sectors			= 0xffff,  	.use_clustering			= ENABLE_CLUSTERING, +	.change_queue_depth		= pvscsi_change_queue_depth,  	.eh_abort_handler		= pvscsi_abort,  	.eh_device_reset_handler	= pvscsi_device_reset,  	.eh_bus_reset_handler		= pvscsi_bus_reset, @@ -1034,6 +1122,34 @@ static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)  	return 1;  } +static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter, +				      bool enable) +{ +	u32 val; + +	if (!pvscsi_use_req_threshold) +		return false; + +	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, +			 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD); +	val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS); +	if (val == -1) { +		printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n"); +		return false; +	} else { +		struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 }; +		cmd_msg.enable = enable; +		printk(KERN_INFO +		       "vmw_pvscsi: %sabling reqCallThreshold\n", +			enable ? "en" : "dis"); +		pvscsi_write_cmd_desc(adapter, +				      PVSCSI_CMD_SETUP_REQCALLTHRESHOLD, +				      &cmd_msg, sizeof(cmd_msg)); +		return pvscsi_reg_read(adapter, +				       PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0; +	} +} +  static irqreturn_t pvscsi_isr(int irq, void *devp)  {  	struct pvscsi_adapter *adapter = devp; @@ -1236,11 +1352,12 @@ exit:  static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)  {  	struct pvscsi_adapter *adapter; -	struct Scsi_Host *host; -	struct device *dev; +	struct pvscsi_adapter adapter_temp; +	struct Scsi_Host *host = NULL;  	unsigned int i;  	unsigned long flags = 0;  	int error; +	u32 max_id;  	error = -ENODEV; @@ -1258,34 +1375,19 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)  		goto out_disable_device;  	} -	pvscsi_template.can_queue = -		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * -		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; -	pvscsi_template.cmd_per_lun = -		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); -	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); -	if (!host) { -		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); -		goto out_disable_device; -	} - -	adapter = shost_priv(host); +	/* +	 * Let's use a temp pvscsi_adapter struct until we find the number of +	 * targets on the adapter, after that we will switch to the real +	 * allocated struct. +	 */ +	adapter = &adapter_temp;  	memset(adapter, 0, sizeof(*adapter));  	adapter->dev  = pdev; -	adapter->host = host; - -	spin_lock_init(&adapter->hw_lock); - -	host->max_channel = 0; -	host->max_id      = 16; -	host->max_lun     = 1; -	host->max_cmd_len = 16; -  	adapter->rev = pdev->revision;  	if (pci_request_regions(pdev, "vmw_pvscsi")) {  		printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); -		goto out_free_host; +		goto out_disable_device;  	}  	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { @@ -1301,7 +1403,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)  	if (i == DEVICE_COUNT_RESOURCE) {  		printk(KERN_ERR  		       "vmw_pvscsi: adapter has no suitable MMIO region\n"); -		goto out_release_resources; +		goto out_release_resources_and_disable;  	}  	adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); @@ -1310,10 +1412,60 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)  		printk(KERN_ERR  		       "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",  		       i, PVSCSI_MEM_SPACE_SIZE); -		goto out_release_resources; +		goto out_release_resources_and_disable;  	}  	pci_set_master(pdev); + +	/* +	 * Ask the device for max number of targets before deciding the +	 * default pvscsi_ring_pages value. +	 */ +	max_id = pvscsi_get_max_targets(adapter); +	printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id); + +	if (pvscsi_ring_pages == 0) +		/* +		 * Set the right default value. Up to 16 it is 8, above it is +		 * max. +		 */ +		pvscsi_ring_pages = (max_id > 16) ? +			PVSCSI_SETUP_RINGS_MAX_NUM_PAGES : +			PVSCSI_DEFAULT_NUM_PAGES_PER_RING; +	printk(KERN_INFO +	       "vmw_pvscsi: setting ring_pages to %d\n", +	       pvscsi_ring_pages); + +	pvscsi_template.can_queue = +		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * +		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; +	pvscsi_template.cmd_per_lun = +		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); +	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); +	if (!host) { +		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); +		goto out_release_resources_and_disable; +	} + +	/* +	 * Let's use the real pvscsi_adapter struct here onwards. +	 */ +	adapter = shost_priv(host); +	memset(adapter, 0, sizeof(*adapter)); +	adapter->dev  = pdev; +	adapter->host = host; +	/* +	 * Copy back what we already have to the allocated adapter struct. +	 */ +	adapter->rev = adapter_temp.rev; +	adapter->mmioBase = adapter_temp.mmioBase; + +	spin_lock_init(&adapter->hw_lock); +	host->max_channel = 0; +	host->max_lun     = 1; +	host->max_cmd_len = 16; +	host->max_id      = max_id; +  	pci_set_drvdata(pdev, host);  	ll_adapter_reset(adapter); @@ -1327,13 +1479,6 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)  	}  	/* -	 * Ask the device for max number of targets. -	 */ -	host->max_id = pvscsi_get_max_targets(adapter); -	dev = pvscsi_dev(adapter); -	dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id); - -	/*  	 * From this point on we should reset the adapter if anything goes  	 * wrong.  	 */ @@ -1373,6 +1518,10 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)  		flags = IRQF_SHARED;  	} +	adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); +	printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", +	       adapter->use_req_threshold ? "en" : "dis"); +  	error = request_irq(adapter->irq, pvscsi_isr, flags,  			    "vmw_pvscsi", adapter);  	if (error) { @@ -1402,13 +1551,15 @@ out_reset_adapter:  	ll_adapter_reset(adapter);  out_release_resources:  	pvscsi_release_resources(adapter); -out_free_host:  	scsi_host_put(host);  out_disable_device: -	pci_set_drvdata(pdev, NULL);  	pci_disable_device(pdev);  	return error; + +out_release_resources_and_disable: +	pvscsi_release_resources(adapter); +	goto out_disable_device;  }  static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) @@ -1445,7 +1596,6 @@ static void pvscsi_remove(struct pci_dev *pdev)  	scsi_host_put(host); -	pci_set_drvdata(pdev, NULL);  	pci_disable_device(pdev);  } diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 3546e8662e3..ce458885127 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -1,7 +1,7 @@  /*   * VMware PVSCSI header file   * - * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.   *   * This program is free software; you can redistribute it and/or modify it   * under the terms of the GNU General Public License as published by the @@ -26,7 +26,7 @@  #include <linux/types.h> -#define PVSCSI_DRIVER_VERSION_STRING   "1.0.2.0-k" +#define PVSCSI_DRIVER_VERSION_STRING   "1.0.5.0-k"  #define PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT 128 @@ -117,8 +117,9 @@ enum PVSCSICommands {  	PVSCSI_CMD_CONFIG            = 7,  	PVSCSI_CMD_SETUP_MSG_RING    = 8,  	PVSCSI_CMD_DEVICE_UNPLUG     = 9, +	PVSCSI_CMD_SETUP_REQCALLTHRESHOLD     = 10, -	PVSCSI_CMD_LAST              = 10  /* has to be last */ +	PVSCSI_CMD_LAST              = 11  /* has to be last */  };  /* @@ -141,6 +142,14 @@ struct PVSCSICmdDescConfigCmd {  	u32 _pad;  } __packed; +/* + * Command descriptor for PVSCSI_CMD_SETUP_REQCALLTHRESHOLD -- + */ + +struct PVSCSICmdDescSetupReqCall { +	u32 enable; +} __packed; +  enum PVSCSIConfigPageType {  	PVSCSI_CONFIG_PAGE_CONTROLLER = 0x1958,  	PVSCSI_CONFIG_PAGE_PHY        = 0x1959, @@ -261,7 +270,9 @@ struct PVSCSIRingsState {  	u32	cmpConsIdx;  	u32	cmpNumEntriesLog2; -	u8	_pad[104]; +	u32	reqCallThreshold; + +	u8	_pad[100];  	u32	msgProdIdx;  	u32	msgConsIdx; diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index f9a6e4b0aff..32674236fec 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c @@ -1252,7 +1252,7 @@ static int wd7000_init(Adapter * host)  		return 0; -	if (request_irq(host->irq, wd7000_intr, IRQF_DISABLED, "wd7000", host)) { +	if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) {  		printk("wd7000_init: can't get IRQ %d.\n", host->irq);  		return (0);  	} diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c index cbf3476c68c..aff31991aea 100644 --- a/drivers/scsi/zorro7xx.c +++ b/drivers/scsi/zorro7xx.c @@ -104,7 +104,7 @@ static int zorro7xx_init_one(struct zorro_dev *z,  	if (ioaddr > 0x01000000)  		hostdata->base = ioremap(ioaddr, zorro_resource_len(z));  	else -		hostdata->base = (void __iomem *)ZTWO_VADDR(ioaddr); +		hostdata->base = ZTWO_VADDR(ioaddr);  	hostdata->clock = 50;  	hostdata->chip710 = 1;  | 
